summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorRichard van der Hoff <richard@matrix.org>2017-11-14 09:23:56 +0000
committerRichard van der Hoff <richard@matrix.org>2017-11-14 10:23:47 +0000
commit1fc66c7460b7e6c503dbeb6577fb0ba3cf7dfd83 (patch)
treebd6bc64260f134351bd74afe3de9442e9538296a /synapse
parentMerge pull request #2668 from turt2live/travis/whoami (diff)
downloadsynapse-1fc66c7460b7e6c503dbeb6577fb0ba3cf7dfd83.tar.xz
Add a load of logging to the room_list handler
So we can see what it gets up to.
Diffstat (limited to 'synapse')
-rw-r--r--synapse/handlers/room_list.py13
1 files changed, 13 insertions, 0 deletions
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index 2cf34e51cb..928ee38aea 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -154,6 +154,8 @@ class RoomListHandler(BaseHandler):
             # We want larger rooms to be first, hence negating num_joined_users
             rooms_to_order_value[room_id] = (-num_joined_users, room_id)
 
+        logger.info("Getting ordering for %i rooms since %s",
+                    len(room_ids), stream_token)
         yield concurrently_execute(get_order_for_room, room_ids, 10)
 
         sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
@@ -181,17 +183,25 @@ class RoomListHandler(BaseHandler):
                 rooms_to_scan = rooms_to_scan[:since_token.current_limit]
                 rooms_to_scan.reverse()
 
+        logger.info("After sorting and filtering, %i rooms remain",
+                    len(rooms_to_scan))
+
         # Actually generate the entries. _append_room_entry_to_chunk will append to
         # chunk but will stop if len(chunk) > limit
         chunk = []
         if limit and not search_filter:
             step = limit + 1
             for i in xrange(0, len(rooms_to_scan), step):
+                logger.info("Processing %i rooms for result", step)
                 # We iterate here because the vast majority of cases we'll stop
                 # at first iteration, but occaisonally _append_room_entry_to_chunk
                 # won't append to the chunk and so we need to loop again.
                 # We don't want to scan over the entire range either as that
                 # would potentially waste a lot of work.
+                #
+                # XXX why would that happen? _append_room_entry_to_chunk will
+                # only exclude rooms which don't match search_filter, but we
+                # know search_filter is None here.
                 yield concurrently_execute(
                     lambda r: self._append_room_entry_to_chunk(
                         r, rooms_to_num_joined[r],
@@ -199,9 +209,11 @@ class RoomListHandler(BaseHandler):
                     ),
                     rooms_to_scan[i:i + step], 10
                 )
+                logger.info("Now %i rooms in result", len(chunk))
                 if len(chunk) >= limit + 1:
                     break
         else:
+            logger.info("Processing %i rooms for result", len(rooms_to_scan))
             yield concurrently_execute(
                 lambda r: self._append_room_entry_to_chunk(
                     r, rooms_to_num_joined[r],
@@ -209,6 +221,7 @@ class RoomListHandler(BaseHandler):
                 ),
                 rooms_to_scan, 5
             )
+            logger.info("Now %i rooms in result", len(chunk))
 
         chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))