From 4903ccf159552319a36708069c8eb52ce53542dd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Apr 2017 00:46:54 +0100 Subject: Fix some lies, and other clarifications, in docstrings The documentation on get_json has been wrong ever since the very first commit to synapse... --- synapse/federation/federation_client.py | 50 +++++++++++++++++++++++++++++++-- synapse/federation/transport/client.py | 20 +++++++++++++ synapse/http/matrixfederationclient.py | 21 +++++++++----- 3 files changed, 82 insertions(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index deee0f4904..861441708b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -474,8 +474,13 @@ class FederationClient(FederationBase): content (object): Any additional data to put into the content field of the event. Return: - A tuple of (origin (str), event (object)) where origin is the remote - homeserver which generated the event. + Deferred: resolves to a tuple of (origin (str), event (object)) + where origin is the remote homeserver which generated the event. + + Fails with a ``CodeMessageException`` if the chosen remote server + returns a 300/400 code. + + Fails with a ``RuntimeError`` if no servers were reachable. """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: @@ -528,6 +533,27 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_join(self, destinations, pdu): + """Sends a join event to one of a list of homeservers. + + Doing so will cause the remote server to add the event to the graph, + and send the event out to the rest of the federation. + + Args: + destinations (str): Candidate homeservers which are probably + participating in the room. + pdu (BaseEvent): event to be sent + + Return: + Deferred: resolves to a dict with members ``origin`` (a string + giving the serer the event was sent to, ``state`` (?) and + ``auth_chain``. + + Fails with a ``CodeMessageException`` if the chosen remote server + returns a 300/400 code. + + Fails with a ``RuntimeError`` if no servers were reachable. + """ + for destination in destinations: if destination == self.server_name: continue @@ -635,6 +661,26 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_leave(self, destinations, pdu): + """Sends a leave event to one of a list of homeservers. + + Doing so will cause the remote server to add the event to the graph, + and send the event out to the rest of the federation. + + This is mostly useful to reject received invites. + + Args: + destinations (str): Candidate homeservers which are probably + participating in the room. + pdu (BaseEvent): event to be sent + + Return: + Deferred: resolves to None. + + Fails with a ``CodeMessageException`` if the chosen remote server + returns a non-200 code. + + Fails with a ``RuntimeError`` if no servers were reachable. + """ for destination in destinations: if destination == self.server_name: continue diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 15a03378f5..8887c624da 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -193,6 +193,26 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function def make_membership_event(self, destination, room_id, user_id, membership): + """Asks a remote server to build and sign us a membership event + + Note that this does not append any events to any graphs. + + Args: + destination (str): address of remote homeserver + room_id (str): room to join/leave + user_id (str): user to be joined/left + membership (str): one of join/leave + + Returns: + Deferred: Succeeds when we get a 2xx HTTP response. The result + will be the decoded JSON body (ie, the new event). + + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. + + Fails with ``NotRetryingDestination`` if we are not yet ready + to retry this server. + """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: raise RuntimeError( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 62b4d7e93d..747a791f83 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -125,6 +125,8 @@ class MatrixFederationHttpClient(object): code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. + (May also fail with plenty of other Exceptions for things like DNS + failures, connection failures, SSL failures.) """ limiter = yield synapse.util.retryutils.get_retry_limiter( destination, @@ -302,8 +304,10 @@ class MatrixFederationHttpClient(object): Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result - will be the decoded JSON body. On a 4xx or 5xx error response a - CodeMessageException is raised. + will be the decoded JSON body. + + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. @@ -360,8 +364,10 @@ class MatrixFederationHttpClient(object): try the request anyway. Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result - will be the decoded JSON body. On a 4xx or 5xx error response a - CodeMessageException is raised. + will be the decoded JSON body. + + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. @@ -410,10 +416,11 @@ class MatrixFederationHttpClient(object): ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. Returns: - Deferred: Succeeds when we get *any* HTTP response. + Deferred: Succeeds when we get a 2xx HTTP response. The result + will be the decoded JSON body. - The result of the deferred is a tuple of `(code, response)`, - where `response` is a dict representing the decoded JSON body. + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. -- cgit 1.4.1 From 736b9a478478f82ac27a9a6a4899aed66b0b64a8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Apr 2017 00:51:01 +0100 Subject: Remove redundant function inline `reject_remote_invite`, which only existed to make tracing the callflow more difficult. --- synapse/handlers/room_member.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2052d6d05f..08fc6d6783 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -139,13 +139,6 @@ class RoomMemberHandler(BaseHandler): ) yield user_joined_room(self.distributor, user, room_id) - def reject_remote_invite(self, user_id, room_id, remote_room_hosts): - return self.hs.get_handlers().federation_handler.do_remotely_reject_invite( - remote_room_hosts, - room_id, - user_id - ) - @defer.inlineCallbacks def update_membership( self, @@ -286,10 +279,12 @@ class RoomMemberHandler(BaseHandler): else: # send the rejection to the inviter's HS. remote_room_hosts = remote_room_hosts + [inviter.domain] - + fed_handler = self.hs.get_handlers().federation_handler try: - ret = yield self.reject_remote_invite( - target.to_string(), room_id, remote_room_hosts + ret = yield fed_handler.do_remotely_reject_invite( + remote_room_hosts, + room_id, + target.to_string(), ) defer.returnValue(ret) except SynapseError as e: -- cgit 1.4.1 From 838810b76af338ecd5d2a34409b4e6ca78daaddb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Apr 2017 01:22:07 +0100 Subject: Broaden the conditions for locally_rejecting invites The logic for marking invites as locally rejected was all well and good, but didn't happen when the remote server returned a 500, or wasn't reachable, or had no DNS, or whatever. Just expand the except clause to catch everything. Fixes https://github.com/matrix-org/synapse/issues/761. --- synapse/handlers/room_member.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 08fc6d6783..28b2c80a93 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -287,7 +287,13 @@ class RoomMemberHandler(BaseHandler): target.to_string(), ) defer.returnValue(ret) - except SynapseError as e: + except Exception as e: + # if we were unable to reject the exception, just mark + # it as rejected on our end and plough ahead. + # + # The 'except' clause is very broad, but we need to + # capture everything from DNS failures upwards + # logger.warn("Failed to reject invite: %s", e) yield self.store.locally_reject_invite( -- cgit 1.4.1 From 0cdb32fc43aa2edbdd60fc9f092632a5edf8b348 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Apr 2017 01:24:17 +0100 Subject: Remove redundant try/except clauses The `except SynapseError` clauses were pointless because the wrapped functions would never throw a `SynapseError` (they either throw a `CodeMessageException` or a `RuntimeError`). The `except CodeMessageException` is now also pointless because the caller treats all exceptions equally, so we may as well just throw the `CodeMessageException`. --- synapse/handlers/federation.py | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2d9126dd86..52be5a402d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1090,19 +1090,13 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id): - try: - origin, event = yield self._make_and_verify_event( - target_hosts, - room_id, - user_id, - "leave" - ) - event = self._sign_event(event) - except SynapseError: - raise - except CodeMessageException as e: - logger.warn("Failed to reject invite: %s", e) - raise SynapseError(500, "Failed to reject invite") + origin, event = yield self._make_and_verify_event( + target_hosts, + room_id, + user_id, + "leave" + ) + event = self._sign_event(event) # Try the host that we succesfully called /make_leave/ on first for # the /send_leave/ request. @@ -1112,16 +1106,10 @@ class FederationHandler(BaseHandler): except ValueError: pass - try: - yield self.replication_layer.send_leave( - target_hosts, - event - ) - except SynapseError: - raise - except CodeMessageException as e: - logger.warn("Failed to reject invite: %s", e) - raise SynapseError(500, "Failed to reject invite") + yield self.replication_layer.send_leave( + target_hosts, + event + ) context = yield self.state_handler.compute_event_context(event) -- cgit 1.4.1 From 91b39818007e955b15407c3dd9fd2d310d08842b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Apr 2017 01:50:36 +0100 Subject: Try harder when sending leave events When we're rejecting invites, ignore the backoff data, so that we have a better chance of not getting the room out of sync. --- synapse/federation/transport/client.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 8887c624da..52b2a717d2 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -221,11 +221,23 @@ class TransportLayerClient(object): ) path = PREFIX + "/make_%s/%s/%s" % (membership, room_id, user_id) + ignore_backoff = False + retry_on_dns_fail = False + + if membership == Membership.LEAVE: + # we particularly want to do our best to send leave events. The + # problem is that if it fails, we won't retry it later, so if the + # remote server was just having a momentary blip, the room will be + # out of sync. + ignore_backoff = True + retry_on_dns_fail = True + content = yield self.client.get_json( destination=destination, path=path, - retry_on_dns_fail=False, + retry_on_dns_fail=retry_on_dns_fail, timeout=20000, + ignore_backoff=ignore_backoff, ) defer.returnValue(content) @@ -252,6 +264,12 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, + + # we want to do our best to send this through. The problem is + # that if it fails, we won't retry it later, so if the remote + # server was just having a momentary blip, the room will be out of + # sync. + ignore_backoff=True, ) defer.returnValue(response) -- cgit 1.4.1 From e4f34311166cc93ad26fc12969867e1db30b4a30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Apr 2017 13:27:38 +0100 Subject: Remove unused cache --- synapse/replication/slave/storage/events.py | 3 --- synapse/storage/state.py | 7 +------ 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'synapse') diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 4ca1e5aa8c..ab48ff925e 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -102,9 +102,6 @@ class SlavedEventStore(BaseSlavedStore): _get_state_groups_from_groups_txn = ( DataStore._get_state_groups_from_groups_txn.__func__ ) - _get_state_group_from_group = ( - StateStore.__dict__["_get_state_group_from_group"] - ) get_recent_event_ids_for_room = ( StreamStore.__dict__["get_recent_event_ids_for_room"] ) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index acd69944c4..927a936013 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -279,12 +279,7 @@ class StateStore(SQLBaseStore): return count - @cached(num_args=2, max_entries=100000, iterable=True) - def _get_state_group_from_group(self, group, types): - raise NotImplementedError() - - @cachedList(cached_method_name="_get_state_group_from_group", - list_name="groups", num_args=2, inlineCallbacks=True) + @defer.inlineCallbacks def _get_state_groups_from_groups(self, groups, types): """Returns dictionary state_group -> (dict of (type, state_key) -> event id) """ -- cgit 1.4.1 From d134d0935e20add0dca69a3e0787e9629763ac78 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Apr 2017 14:07:27 +0100 Subject: Only intern ascii strings --- synapse/util/caches/__init__.py | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 8a7774a88e..9fd35a8134 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -14,7 +14,6 @@ # limitations under the License. import synapse.metrics -from lrucache import LruCache import os CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) @@ -40,10 +39,6 @@ def register_cache(name, cache): ) -_string_cache = LruCache(int(100000 * CACHE_SIZE_FACTOR)) -_stirng_cache_metrics = register_cache("string_cache", _string_cache) - - KNOWN_KEYS = { key: key for key in ( @@ -67,14 +62,16 @@ KNOWN_KEYS = { def intern_string(string): - """Takes a (potentially) unicode string and interns using custom cache + """Takes a (potentially) unicode string and interns it if it's ascii """ - new_str = _string_cache.setdefault(string, string) - if new_str is string: - _stirng_cache_metrics.inc_hits() - else: - _stirng_cache_metrics.inc_misses() - return new_str + if string is None: + return None + + try: + string = string.encode("ascii") + return intern(string) + except UnicodeEncodeError: + return string def intern_dict(dictionary): @@ -87,13 +84,9 @@ def intern_dict(dictionary): def _intern_known_values(key, value): - intern_str_keys = ("event_id", "room_id") - intern_unicode_keys = ("sender", "user_id", "type", "state_key") - - if key in intern_str_keys: - return intern(value.encode('ascii')) + intern_keys = ("event_id", "room_id", "sender", "user_id", "type", "state_key",) - if key in intern_unicode_keys: + if key in intern_keys: return intern_string(value) return value -- cgit 1.4.1 From e6e262763663a7aebd3e38ee9763faeff64459c8 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 24 Apr 2017 18:51:25 +0100 Subject: Fix code for reporting old verify keys in synapse --- synapse/rest/key/v2/local_key_resource.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index ff95269ba8..be68d9a096 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -84,12 +84,11 @@ class LocalKey(Resource): } old_verify_keys = {} - for key in self.config.old_signing_keys: - key_id = "%s:%s" % (key.alg, key.version) + for key_id, key in self.config.old_signing_keys.items(): verify_key_bytes = key.encode() old_verify_keys[key_id] = { u"key": encode_base64(verify_key_bytes), - u"expired_ts": key.expired, + u"expired_ts": key.expired_ts, } tls_fingerprints = self.config.tls_fingerprints -- cgit 1.4.1 From 119cb9bbcf429321d27cbc1422049974e2ad8982 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 10:23:11 +0100 Subject: Reduce cache size by not storing deferreds Currently the cache descriptors store deferreds rather than raw values, this is a simple way of triggering only one database hit and sharing the result if two callers attempt to get the same value. However, there are a few caches that simply store a mapping from string to string (or int). These caches can have a large number of entries, under the assumption that each entry is small. However, the size of a deferred (specifically the size of ObservableDeferred) is signigicantly larger than that of the raw value, 2kb vs 32b. This PR therefore changes the cache descriptors to store the raw values rather than the deferreds. As a side effect cached storage function now either return a deferred or the actual value, as the cached list decriptor already does. This is fine as we always end up just yield'ing on the returned value eventually, which handles that case correctly. --- synapse/storage/receipts.py | 11 +++++++---- synapse/util/caches/descriptors.py | 39 ++++++++++++++++++++------------------ 2 files changed, 28 insertions(+), 22 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 6b0f8c2787..efb90c3c91 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -47,10 +47,13 @@ class ReceiptsStore(SQLBaseStore): # Returns an ObservableDeferred res = self.get_users_with_read_receipts_in_room.cache.get((room_id,), None) - if res and res.called and user_id in res.result: - # We'd only be adding to the set, so no point invalidating if the - # user is already there - return + if res: + if isinstance(res, defer.Deferred) and res.called: + res = res.result + if user_id in res: + # We'd only be adding to the set, so no point invalidating if the + # user is already there + return self.get_users_with_read_receipts_in_room.invalidate((room_id,)) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 9d0d0be1f9..807e147657 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -19,7 +19,7 @@ from synapse.util import unwrapFirstError, logcontext from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry -from . import DEBUG_CACHES, register_cache +from . import register_cache from twisted.internet import defer from collections import namedtuple @@ -76,7 +76,7 @@ class Cache(object): self.cache = LruCache( max_size=max_entries, keylen=keylen, cache_type=cache_type, - size_callback=(lambda d: len(d.result)) if iterable else None, + size_callback=(lambda d: len(d)) if iterable else None, ) self.name = name @@ -96,6 +96,17 @@ class Cache(object): ) def get(self, key, default=_CacheSentinel, callback=None): + """Looks the key up in the caches. + + Args: + key(tuple) + default: What is returned if key is not in the caches. If not + specified then function throws KeyError instead + callback(fn): Gets called when the entry in the cache is invalidated + + Returns: + Either a Deferred or the raw result + """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) if val is not _CacheSentinel: @@ -137,7 +148,7 @@ class Cache(object): if self.sequence == entry.sequence: existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry is entry: - self.cache.set(key, entry.deferred, entry.callbacks) + self.cache.set(key, result, entry.callbacks) else: entry.invalidate() else: @@ -335,20 +346,10 @@ class CacheDescriptor(_CacheDescriptorBase): try: cached_result_d = cache.get(cache_key, callback=invalidate_callback) - observer = cached_result_d.observe() - if DEBUG_CACHES: - @defer.inlineCallbacks - def check_result(cached_result): - actual_result = yield self.function_to_call(obj, *args, **kwargs) - if actual_result != cached_result: - logger.error( - "Stale cache entry %s%r: cached: %r, actual %r", - self.orig.__name__, cache_key, - cached_result, actual_result, - ) - raise ValueError("Stale cache entry") - defer.returnValue(cached_result) - observer.addCallback(check_result) + if isinstance(cached_result_d, ObservableDeferred): + observer = cached_result_d.observe() + else: + observer = cached_result_d except KeyError: ret = defer.maybeDeferred( @@ -447,7 +448,9 @@ class CacheListDescriptor(_CacheDescriptorBase): try: res = cache.get(tuple(key), callback=invalidate_callback) - if not res.has_succeeded(): + if not isinstance(res, ObservableDeferred): + results[arg] = res + elif not res.has_succeeded(): res = res.observe() res.addCallback(lambda r, arg: (arg, r), arg) cached_defers[arg] = res -- cgit 1.4.1 From efab1dadde8ce6305bf0960bf70aaeef54b42db6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 10:54:09 +0100 Subject: Remove DEBUG_CACHES --- synapse/util/caches/__init__.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'synapse') diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 9fd35a8134..4a83c46d98 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -18,8 +18,6 @@ import os CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) -DEBUG_CACHES = False - metrics = synapse.metrics.get_metrics_for("synapse.util.caches") caches_by_name = {} -- cgit 1.4.1 From 22f3d3ae769a031c42cda1fb233dd91e28e585d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 11:43:03 +0100 Subject: Reduce _get_state_group_for_event cache size --- synapse/storage/state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 927a936013..e89001d994 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -507,7 +507,7 @@ class StateStore(SQLBaseStore): state_map = yield self.get_state_ids_for_events([event_id], types) defer.returnValue(state_map[event_id]) - @cached(num_args=2, max_entries=100000) + @cached(num_args=2, max_entries=50000) def _get_state_group_for_event(self, room_id, event_id): return self._simple_select_one_onecol( table="event_to_state_groups", -- cgit 1.4.1 From d9aa645f86db733bb0419b5f5428ba9a9c799735 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 14:38:51 +0100 Subject: Reduce size of joined_user cache The _get_joined_users_from_context cache stores a mapping from user_id to avatar_url and display_name. Instead of storing those in a dict, store them in a namedtuple as that uses much less memory. We also try converting the string to ascii to further reduce the size. --- synapse/push/bulk_push_rule_evaluator.py | 7 +++++-- synapse/rest/client/v1/room.py | 8 +++++++- synapse/storage/roommember.py | 22 ++++++++++++++-------- synapse/util/stringutils.py | 14 ++++++++++++++ 4 files changed, 40 insertions(+), 11 deletions(-) (limited to 'synapse') diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 78b095c903..503fef4261 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -87,8 +87,11 @@ class BulkPushRuleEvaluator: condition_cache = {} for uid, rules in self.rules_by_user.items(): - display_name = room_members.get(uid, {}).get("display_name", None) - if not display_name: + display_name = None + profile_info = room_members.get(uid, {}) + if profile_info: + display_name = profile_info.display_name + else: # Handle the case where we are pushing a membership event to # that user, as they might not be already joined. if event.type == EventTypes.Member and event.state_key == uid: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 0bdd6b5b36..c376ab8fd7 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -406,7 +406,13 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet): users_with_profile = yield self.state.get_current_user_in_room(room_id) defer.returnValue((200, { - "joined": users_with_profile + "joined": { + user_id: { + "avatar_url": profile.avatar_url, + "display_name": profile.display_name, + } + for user_id, profile in users_with_profile.iteritems() + } })) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 367dbbbcf6..68e724ac7f 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -19,6 +19,7 @@ from collections import namedtuple from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from synapse.util.stringutils import to_ascii from synapse.api.constants import Membership, EventTypes from synapse.types import get_domain_from_id @@ -35,6 +36,11 @@ RoomsForUser = namedtuple( ) +ProfileInfo = namedtuple( + "ProfileInfo", ("avatar_url", "display_name") +) + + _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" @@ -422,20 +428,20 @@ class RoomMemberStore(SQLBaseStore): ) users_in_room = { - row["user_id"]: { - "display_name": row["display_name"], - "avatar_url": row["avatar_url"], - } + to_ascii(row["user_id"]): ProfileInfo( + avatar_url=to_ascii(row["avatar_url"]), + display_name=to_ascii(row["display_name"]), + ) for row in rows } if event is not None and event.type == EventTypes.Member: if event.membership == Membership.JOIN: if event.event_id in member_event_ids: - users_in_room[event.state_key] = { - "display_name": event.content.get("displayname", None), - "avatar_url": event.content.get("avatar_url", None), - } + users_in_room[to_ascii(event.state_key)] = ProfileInfo( + display_name=to_ascii(event.content.get("displayname", None)), + avatar_url=to_ascii(event.content.get("avatar_url", None)), + ) defer.returnValue(users_in_room) diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index a100f151d4..95a6168e16 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -40,3 +40,17 @@ def is_ascii(s): return False else: return True + + +def to_ascii(s): + """Converts a string to ascii if it is ascii, otherwise leave it alone. + + If given None then will return None. + """ + if s is None: + return None + + try: + return s.encode("ascii") + except UnicodeEncodeError: + return s -- cgit 1.4.1 From f144365281e0c925ea467669cbfc6253e00f10e6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 15:18:26 +0100 Subject: Comment --- synapse/storage/roommember.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 68e724ac7f..7ad2198d96 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -36,6 +36,8 @@ RoomsForUser = namedtuple( ) +# We store this using a namedtuple so that we save about 3x space over using a +# dict. ProfileInfo = namedtuple( "ProfileInfo", ("avatar_url", "display_name") ) -- cgit 1.4.1 From f7181615f2264edcfd5aa135df127d88a2b16a1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 15:22:59 +0100 Subject: Don't specify default as dict --- synapse/push/bulk_push_rule_evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 503fef4261..e40f62de78 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -88,7 +88,7 @@ class BulkPushRuleEvaluator: for uid, rules in self.rules_by_user.items(): display_name = None - profile_info = room_members.get(uid, {}) + profile_info = room_members.get(uid) if profile_info: display_name = profile_info.display_name else: -- cgit 1.4.1 From acb58bfb6a3523c67b3d426bb6a25b7329a4604f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Apr 2017 15:39:19 +0100 Subject: fix up --- synapse/push/bulk_push_rule_evaluator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index e40f62de78..f943ff640f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -91,7 +91,8 @@ class BulkPushRuleEvaluator: profile_info = room_members.get(uid) if profile_info: display_name = profile_info.display_name - else: + + if not display_name: # Handle the case where we are pushing a membership event to # that user, as they might not be already joined. if event.type == EventTypes.Member and event.state_key == uid: -- cgit 1.4.1