diff options
-rw-r--r-- | changelog.d/7673.feature | 2 | ||||
-rw-r--r-- | synapse/push/bulk_push_rule_evaluator.py | 13 | ||||
-rw-r--r-- | synapse/storage/data_stores/main/event_push_actions.py | 27 |
3 files changed, 20 insertions, 22 deletions
diff --git a/changelog.d/7673.feature b/changelog.d/7673.feature index 74e2059ade..ecc3ffd8d5 100644 --- a/changelog.d/7673.feature +++ b/changelog.d/7673.feature @@ -1 +1 @@ -Add a per-room counter for unread messages in responses to `/sync` requests. +Add a per-room counter for unread messages in responses to `/sync` requests. Implements [MSC2625](https://github.com/matrix-org/matrix-doc/pull/2625). diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index f7c3db5828..3244d39c37 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -191,13 +191,12 @@ class BulkPushRuleEvaluator(object): ) if matches: actions = [x for x in rule["actions"] if x != "dont_notify"] - if actions: - if ( - "notify" in actions - or "org.matrix.msc2625.mark_unread" in actions - ): - # Push rules say we should act on this event. - actions_by_user[uid] = actions + if ( + "notify" in actions + or "org.matrix.msc2625.mark_unread" in actions + ): + # Push rules say we should act on this event. + actions_by_user[uid] = actions break # Mark in the DB staging area the push actions for users who should be diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/data_stores/main/event_push_actions.py index 688aef4d2f..4409e87913 100644 --- a/synapse/storage/data_stores/main/event_push_actions.py +++ b/synapse/storage/data_stores/main/event_push_actions.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Dict, Tuple import attr from six import iteritems @@ -857,11 +858,11 @@ class EventPushActionsStore(EventPushActionsWorkerStore): # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, - coalesce(old.%s, 0) + upd.%s, + coalesce(old.%s, 0) + upd.cnt, upd.stream_ordering, old.user_id FROM ( - SELECT user_id, room_id, count(*) as %s, + SELECT user_id, room_id, count(*) as cnt, max(stream_ordering) as stream_ordering FROM event_push_actions WHERE ? <= stream_ordering AND stream_ordering < ? @@ -874,31 +875,29 @@ class EventPushActionsStore(EventPushActionsWorkerStore): # First get the count of unread messages. txn.execute( - sql % ("unread_count", "unread_count", "unread_count", ""), + sql % ("unread_count", ""), (old_rotate_stream_ordering, rotate_to_stream_ordering), ) - unread_rows = txn.fetchall() - - # Then get the count of notifications. - txn.execute( - sql % ("notif_count", "notif_count", "notif_count", "AND notif = 1"), - (old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - notif_rows = txn.fetchall() # We need to merge both lists into a single object because we might not have the # same amount of rows in each of them. In this case we use a dict indexed on the # user ID and room ID to make it easier to populate. - summaries = {} - for row in unread_rows: + summaries = {} # type: Dict[Tuple[str, str], EventPushSummary] + for row in txn: summaries[(row[0], row[1])] = EventPushSummary( user_id=row[0], room_id=row[1], unread_count=row[2], stream_ordering=row[3], old_user_id=row[4], notif_count=0, ) + # Then get the count of notifications. + txn.execute( + sql % ("notif_count", "AND notif = 1"), + (old_rotate_stream_ordering, rotate_to_stream_ordering), + ) + # notif_rows is populated based on a subset of the query used to populate # unread_rows, so we can be sure that there will be no KeyError here. - for row in notif_rows: + for row in txn: summaries[(row[0], row[1])].notif_count = row[2] logger.info("Rotating notifications, handling %d rows", len(summaries)) |