diff --git a/docs/workers.rst b/docs/workers.rst
index 4eb05b0e59..65b6e690f7 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -42,6 +42,7 @@ The current available worker applications are:
* synapse.app.appservice - handles output traffic to Application Services
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
* synapse.app.media_repository - handles the media repository.
+ * synapse.app.client_reader - handles client API endpoints like /publicRooms
Each worker configuration file inherits the configuration of the main homeserver
configuration file. You can then override configuration specific to that worker,
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 9aaa3bace2..9fccc73db3 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -51,7 +51,7 @@ import sys
import logging
import gc
-logger = logging.getLogger("synapse.app.federation_reader")
+logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index f15987b265..5a533682c5 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -115,134 +115,76 @@ class RoomListHandler(BaseHandler):
sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
sorted_rooms = [room_id for room_id, _ in sorted_entries]
+ # `sorted_rooms` should now be a list of all public room ids that is
+ # stable across pagination. Therefore, we can use indices into this
+ # list as our pagination tokens.
+
+ # Filter out rooms that we don't want to return
+ rooms_to_scan = [
+ r for r in sorted_rooms
+ if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
+ ]
+
if since_token:
+ # Filter out rooms we've already returned previously
+ # `since_token.current_limit` is the index of the last room we
+ # sent down, so we exclude it and everything before/after it.
if since_token.direction_is_forward:
- sorted_rooms = sorted_rooms[since_token.current_limit:]
+ rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
else:
- sorted_rooms = sorted_rooms[:since_token.current_limit]
- sorted_rooms.reverse()
-
- rooms_to_scan = sorted_rooms
- if limit and not search_filter:
- rooms_to_scan = sorted_rooms[:limit + 1]
+ rooms_to_scan = rooms_to_scan[:since_token.current_limit]
+ rooms_to_scan.reverse()
+ # Actually generate the entries. _generate_room_entry will append to
+ # chunk but will stop if len(chunk) > limit
chunk = []
-
- @defer.inlineCallbacks
- def handle_room(room_id):
- if limit and len(chunk) > limit + 1:
- # We've already got enough, so lets just drop it.
- return
-
- num_joined_users = rooms_to_num_joined[room_id]
- if num_joined_users == 0:
- return
-
- if room_id in newly_unpublished:
- return
-
- result = {
- "room_id": room_id,
- "num_joined_members": num_joined_users,
- }
-
- current_state_ids = yield self.state_handler.get_current_state_ids(room_id)
-
- event_map = yield self.store.get_events([
- event_id for key, event_id in current_state_ids.items()
- if key[0] in (
- EventTypes.JoinRules,
- EventTypes.Name,
- EventTypes.Topic,
- EventTypes.CanonicalAlias,
- EventTypes.RoomHistoryVisibility,
- EventTypes.GuestAccess,
- "m.room.avatar",
+ if limit and not search_filter:
+ step = limit + 1
+ for i in xrange(0, len(rooms_to_scan), step):
+ # We iterate here because the vast majority of cases we'll stop
+ # at first iteration, but occaisonally _generate_room_entry
+ # won't append to the chunk and so we need to loop again.
+ # We don't want to scan over the entire range either as that
+ # would potentially waste a lot of work.
+ yield concurrently_execute(
+ lambda r: self._generate_room_entry(
+ r, rooms_to_num_joined[r],
+ chunk, limit, search_filter
+ ),
+ rooms_to_scan[i:i + step], 10
)
- ])
-
- current_state = {
- (ev.type, ev.state_key): ev
- for ev in event_map.values()
- }
-
- # Double check that this is actually a public room.
- join_rules_event = current_state.get((EventTypes.JoinRules, ""))
- if join_rules_event:
- join_rule = join_rules_event.content.get("join_rule", None)
- if join_rule and join_rule != JoinRules.PUBLIC:
- defer.returnValue(None)
-
- aliases = yield self.store.get_aliases_for_room(room_id)
- if aliases:
- result["aliases"] = aliases
-
- name_event = yield current_state.get((EventTypes.Name, ""))
- if name_event:
- name = name_event.content.get("name", None)
- if name:
- result["name"] = name
-
- topic_event = current_state.get((EventTypes.Topic, ""))
- if topic_event:
- topic = topic_event.content.get("topic", None)
- if topic:
- result["topic"] = topic
-
- canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
- if canonical_event:
- canonical_alias = canonical_event.content.get("alias", None)
- if canonical_alias:
- result["canonical_alias"] = canonical_alias
-
- visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
- visibility = None
- if visibility_event:
- visibility = visibility_event.content.get("history_visibility", None)
- result["world_readable"] = visibility == "world_readable"
-
- guest_event = current_state.get((EventTypes.GuestAccess, ""))
- guest = None
- if guest_event:
- guest = guest_event.content.get("guest_access", None)
- result["guest_can_join"] = guest == "can_join"
-
- avatar_event = current_state.get(("m.room.avatar", ""))
- if avatar_event:
- avatar_url = avatar_event.content.get("url", None)
- if avatar_url:
- result["avatar_url"] = avatar_url
-
- if _matches_room_entry(result, search_filter):
- chunk.append(result)
-
- yield concurrently_execute(handle_room, rooms_to_scan, 10)
+ if len(chunk) >= limit + 1:
+ break
+ else:
+ yield concurrently_execute(
+ lambda r: self._generate_room_entry(
+ r, rooms_to_num_joined[r],
+ chunk, limit, search_filter
+ ),
+ rooms_to_scan, 5
+ )
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
# Work out the new limit of the batch for pagination, or None if we
# know there are no more results that would be returned.
+ # i.e., [since_token.current_limit..new_limit] is the batch of rooms
+ # we've returned (or the reverse if we paginated backwards)
+ # We tried to pull out limit + 1 rooms above, so if we have <= limit
+ # then we know there are no more results to return
new_limit = None
if chunk and (not limit or len(chunk) > limit):
- if limit:
- chunk = chunk[:limit]
-
- addition = 1
- if since_token:
- addition += since_token.current_limit
if not since_token or since_token.direction_is_forward:
+ if limit:
+ chunk = chunk[:limit]
last_room_id = chunk[-1]["room_id"]
else:
+ if limit:
+ chunk = chunk[-limit:]
last_room_id = chunk[0]["room_id"]
- addition *= -1
- try:
- new_limit = sorted_rooms.index(last_room_id) + addition
- if new_limit >= len(sorted_rooms):
- new_limit = None
- except ValueError:
- pass
+ new_limit = sorted_rooms.index(last_room_id)
results = {
"chunk": chunk,
@@ -252,7 +194,7 @@ class RoomListHandler(BaseHandler):
results["new_rooms"] = bool(newly_visible)
if not since_token or since_token.direction_is_forward:
- if new_limit:
+ if new_limit is not None:
results["next_batch"] = RoomListNextBatch(
stream_ordering=stream_token,
public_room_stream_id=public_room_stream_id,
@@ -263,9 +205,10 @@ class RoomListHandler(BaseHandler):
if since_token:
results["prev_batch"] = since_token.copy_and_replace(
direction_is_forward=False,
+ current_limit=since_token.current_limit + 1,
).to_token()
else:
- if new_limit:
+ if new_limit is not None:
results["prev_batch"] = RoomListNextBatch(
stream_ordering=stream_token,
public_room_stream_id=public_room_stream_id,
@@ -276,11 +219,94 @@ class RoomListHandler(BaseHandler):
if since_token:
results["next_batch"] = since_token.copy_and_replace(
direction_is_forward=True,
+ current_limit=since_token.current_limit - 1,
).to_token()
defer.returnValue(results)
@defer.inlineCallbacks
+ def _generate_room_entry(self, room_id, num_joined_users, chunk, limit,
+ search_filter):
+ if limit and len(chunk) > limit + 1:
+ # We've already got enough, so lets just drop it.
+ return
+
+ result = {
+ "room_id": room_id,
+ "num_joined_members": num_joined_users,
+ }
+
+ current_state_ids = yield self.state_handler.get_current_state_ids(room_id)
+
+ event_map = yield self.store.get_events([
+ event_id for key, event_id in current_state_ids.items()
+ if key[0] in (
+ EventTypes.JoinRules,
+ EventTypes.Name,
+ EventTypes.Topic,
+ EventTypes.CanonicalAlias,
+ EventTypes.RoomHistoryVisibility,
+ EventTypes.GuestAccess,
+ "m.room.avatar",
+ )
+ ])
+
+ current_state = {
+ (ev.type, ev.state_key): ev
+ for ev in event_map.values()
+ }
+
+ # Double check that this is actually a public room.
+ join_rules_event = current_state.get((EventTypes.JoinRules, ""))
+ if join_rules_event:
+ join_rule = join_rules_event.content.get("join_rule", None)
+ if join_rule and join_rule != JoinRules.PUBLIC:
+ defer.returnValue(None)
+
+ aliases = yield self.store.get_aliases_for_room(room_id)
+ if aliases:
+ result["aliases"] = aliases
+
+ name_event = yield current_state.get((EventTypes.Name, ""))
+ if name_event:
+ name = name_event.content.get("name", None)
+ if name:
+ result["name"] = name
+
+ topic_event = current_state.get((EventTypes.Topic, ""))
+ if topic_event:
+ topic = topic_event.content.get("topic", None)
+ if topic:
+ result["topic"] = topic
+
+ canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
+ if canonical_event:
+ canonical_alias = canonical_event.content.get("alias", None)
+ if canonical_alias:
+ result["canonical_alias"] = canonical_alias
+
+ visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
+ visibility = None
+ if visibility_event:
+ visibility = visibility_event.content.get("history_visibility", None)
+ result["world_readable"] = visibility == "world_readable"
+
+ guest_event = current_state.get((EventTypes.GuestAccess, ""))
+ guest = None
+ if guest_event:
+ guest = guest_event.content.get("guest_access", None)
+ result["guest_can_join"] = guest == "can_join"
+
+ avatar_event = current_state.get(("m.room.avatar", ""))
+ if avatar_event:
+ avatar_url = avatar_event.content.get("url", None)
+ if avatar_url:
+ result["avatar_url"] = avatar_url
+
+ if _matches_room_entry(result, search_filter):
+ chunk.append(result)
+
+ @defer.inlineCallbacks
def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
search_filter=None):
if search_filter:
@@ -361,12 +387,12 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", (
def _matches_room_entry(room_entry, search_filter):
if search_filter and search_filter.get("generic_search_term", None):
- generic_search_term = search_filter["generic_search_term"]
- if generic_search_term in room_entry.get("name", ""):
+ generic_search_term = search_filter["generic_search_term"].upper()
+ if generic_search_term in room_entry.get("name", "").upper():
return True
- elif generic_search_term in room_entry.get("topic", ""):
+ elif generic_search_term in room_entry.get("topic", "").upper():
return True
- elif generic_search_term in room_entry.get("canonical_alias", ""):
+ elif generic_search_term in room_entry.get("canonical_alias", "").upper():
return True
else:
return True
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index edb00ed206..85effdfa46 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -263,6 +263,8 @@ BASE_APPEND_UNDERRIDE_RULES = [
}
]
},
+ # XXX: once m.direct is standardised everywhere, we should use it to detect
+ # a DM from the user's perspective rather than this heuristic.
{
'rule_id': 'global/underride/.m.rule.room_one_to_one',
'conditions': [
@@ -289,6 +291,34 @@ BASE_APPEND_UNDERRIDE_RULES = [
}
]
},
+ # XXX: this is going to fire for events which aren't m.room.messages
+ # but are encrypted (e.g. m.call.*)...
+ {
+ 'rule_id': 'global/underride/.m.rule.encrypted_room_one_to_one',
+ 'conditions': [
+ {
+ 'kind': 'room_member_count',
+ 'is': '2',
+ '_id': 'member_count',
+ },
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.room.encrypted',
+ '_id': '_encrypted',
+ }
+ ],
+ 'actions': [
+ 'notify',
+ {
+ 'set_tweak': 'sound',
+ 'value': 'default'
+ }, {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
+ },
{
'rule_id': 'global/underride/.m.rule.message',
'conditions': [
@@ -305,6 +335,25 @@ BASE_APPEND_UNDERRIDE_RULES = [
'value': False
}
]
+ },
+ # XXX: this is going to fire for events which aren't m.room.messages
+ # but are encrypted (e.g. m.call.*)...
+ {
+ 'rule_id': 'global/underride/.m.rule.encrypted',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.room.encrypted',
+ '_id': '_encrypted',
+ }
+ ],
+ 'actions': [
+ 'notify', {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
}
]
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 5584bfbfc0..45287bf05b 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -307,7 +307,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
server = parse_string(request, "server", default=None)
try:
- yield self.auth.get_user_by_req(request)
+ yield self.auth.get_user_by_req(request, allow_guest=True)
except AuthError as e:
# We allow people to not be authed if they're just looking at our
# room list, but require auth when we proxy the request.
@@ -339,7 +339,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_POST(self, request):
- yield self.auth.get_user_by_req(request)
+ yield self.auth.get_user_by_req(request, allow_guest=True)
server = parse_string(request, "server", default=None)
content = parse_json_object_from_request(request)
|