diff options
author | Mark Haines <mark.haines@matrix.org> | 2015-12-01 19:15:27 +0000 |
---|---|---|
committer | Mark Haines <mark.haines@matrix.org> | 2015-12-01 19:15:27 +0000 |
commit | f73ea0bda26a51794abedb65cbf4d0a717dee26c (patch) | |
tree | 64fd41675594a7c71a7f119fb68b5b476f9173fd /synapse/handlers/search.py | |
parent | Only fire user_joined_room on the distributor if the user has actually joined... (diff) | |
parent | Merge pull request #392 from matrix-org/markjh/client_config (diff) | |
download | synapse-f73ea0bda26a51794abedb65cbf4d0a717dee26c.tar.xz |
Merge branch 'develop' into markjh/edu_frequency
Diffstat (limited to 'synapse/handlers/search.py')
-rw-r--r-- | synapse/handlers/search.py | 129 |
1 files changed, 61 insertions, 68 deletions
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 6d2197339e..df6390cf05 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -131,6 +131,17 @@ class SearchHandler(BaseHandler): if batch_group == "room_id": room_ids.intersection_update({batch_group_key}) + if not room_ids: + defer.returnValue({ + "search_categories": { + "room_events": { + "results": {}, + "count": 0, + "highlights": [], + } + } + }) + rank_map = {} # event_id -> rank of event allowed_events = [] room_groups = {} # Holds result of grouping by room, if applicable @@ -178,85 +189,67 @@ class SearchHandler(BaseHandler): s["results"].append(e.event_id) elif order_by == "recent": - # In this case we specifically loop through each room as the given - # limit applies to each room, rather than a global list. - # This is not necessarilly a good idea. - for room_id in room_ids: - room_events = [] - if batch_group == "room_id" and batch_group_key == room_id: - pagination_token = batch_token - else: - pagination_token = None - i = 0 - - # We keep looping and we keep filtering until we reach the limit - # or we run out of things. - # But only go around 5 times since otherwise synapse will be sad. - while len(room_events) < search_filter.limit() and i < 5: - i += 1 - search_result = yield self.store.search_room( - room_id, search_term, keys, search_filter.limit() * 2, - pagination_token=pagination_token, - ) + room_events = [] + i = 0 + + pagination_token = batch_token + + # We keep looping and we keep filtering until we reach the limit + # or we run out of things. + # But only go around 5 times since otherwise synapse will be sad. + while len(room_events) < search_filter.limit() and i < 5: + i += 1 + search_result = yield self.store.search_rooms( + room_ids, search_term, keys, search_filter.limit() * 2, + pagination_token=pagination_token, + ) - if search_result["highlights"]: - highlights.update(search_result["highlights"]) + if search_result["highlights"]: + highlights.update(search_result["highlights"]) - results = search_result["results"] + results = search_result["results"] - results_map = {r["event"].event_id: r for r in results} + results_map = {r["event"].event_id: r for r in results} - rank_map.update({r["event"].event_id: r["rank"] for r in results}) + rank_map.update({r["event"].event_id: r["rank"] for r in results}) - filtered_events = search_filter.filter([ - r["event"] for r in results - ]) + filtered_events = search_filter.filter([ + r["event"] for r in results + ]) - events = yield self._filter_events_for_client( - user.to_string(), filtered_events - ) + events = yield self._filter_events_for_client( + user.to_string(), filtered_events + ) - room_events.extend(events) - room_events = room_events[:search_filter.limit()] + room_events.extend(events) + room_events = room_events[:search_filter.limit()] - if len(results) < search_filter.limit() * 2: - pagination_token = None - break - else: - pagination_token = results[-1]["pagination_token"] - - if room_events: - res = results_map[room_events[-1].event_id] - pagination_token = res["pagination_token"] - - group = room_groups.setdefault(room_id, {}) - if pagination_token: - next_batch = encode_base64("%s\n%s\n%s" % ( - "room_id", room_id, pagination_token - )) - group["next_batch"] = next_batch - - if batch_token: - global_next_batch = next_batch - - group["results"] = [e.event_id for e in room_events] - group["order"] = max( - e.origin_server_ts/1000 for e in room_events - if hasattr(e, "origin_server_ts") - ) + if len(results) < search_filter.limit() * 2: + pagination_token = None + break + else: + pagination_token = results[-1]["pagination_token"] - allowed_events.extend(room_events) + for event in room_events: + group = room_groups.setdefault(event.room_id, { + "results": [], + }) + group["results"].append(event.event_id) - # Normalize the group orders - if room_groups: - if len(room_groups) > 1: - mx = max(g["order"] for g in room_groups.values()) - mn = min(g["order"] for g in room_groups.values()) + if room_events and len(room_events) >= search_filter.limit(): + last_event_id = room_events[-1].event_id + pagination_token = results_map[last_event_id]["pagination_token"] - for g in room_groups.values(): - g["order"] = (g["order"] - mn) * 1.0 / (mx - mn) - else: - room_groups.values()[0]["order"] = 1 + global_next_batch = encode_base64("%s\n%s\n%s" % ( + "all", "", pagination_token + )) + + for room_id, group in room_groups.items(): + group["next_batch"] = encode_base64("%s\n%s\n%s" % ( + "room_id", room_id, pagination_token + )) + + allowed_events.extend(room_events) else: # We should never get here due to the guard earlier. |