From 21f135ba763a583ecf9ba2714b5151f6b14b61fd Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 10 Dec 2015 16:26:08 +0000 Subject: Very first cut of calculating actions for events as they come in. Doesn't store them yet. Not very efficient. --- synapse/storage/registration.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 2e5eddd259..f230faa25e 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -291,6 +291,18 @@ class RegistrationStore(SQLBaseStore): defer.returnValue(ret['user_id']) defer.returnValue(None) + @defer.inlineCallbacks + def get_all_user_ids(self): + """Returns all user ids registered on this homeserver""" + return self.runInteraction( + "get_all_user_ids", + self._get_all_user_ids_txn + ) + + def _get_all_user_ids_txn(self, txn): + txn.execute("SELECT name from users") + return [r[0] for r in txn.fetchall()] + @defer.inlineCallbacks def count_all_users(self): """Counts all users registered on the homeserver.""" -- cgit 1.4.1 From aa667ee396c473f497b084655d47b2a9520a538a Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 10 Dec 2015 17:51:15 +0000 Subject: Save event actions to the db --- synapse/push/action_generator.py | 6 ++-- synapse/storage/__init__.py | 2 ++ synapse/storage/event_actions.py | 41 +++++++++++++++++++++++ synapse/storage/schema/delta/27/event_actions.sql | 25 ++++++++++++++ 4 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 synapse/storage/event_actions.py create mode 100644 synapse/storage/schema/delta/27/event_actions.sql (limited to 'synapse/storage') diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 508eeaed95..870c68a0ca 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -19,7 +19,6 @@ import push_rule_evaluator import logging - logger = logging.getLogger(__name__) @@ -42,6 +41,9 @@ class ActionGenerator: evaluator = yield push_rule_evaluator.\ evaluator_for_user_name_and_profile_tag( uid, None, event['room_id'], self.store - ) + ) actions = yield evaluator.actions_for_event(event) logger.info("actions for user %s: %s", uid, actions) + self.store.set_actions_for_event( + event['event_id'], uid, None, actions + ) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c46b653f11..a112dd237f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -33,6 +33,7 @@ from .pusher import PusherStore from .push_rule import PushRuleStore from .media_repository import MediaRepositoryStore from .rejections import RejectionsStore +from .event_actions import EventActionsStore from .state import StateStore from .signatures import SignatureStore @@ -75,6 +76,7 @@ class DataStore(RoomMemberStore, RoomStore, SearchStore, TagsStore, AccountDataStore, + EventActionsStore ): def __init__(self, hs): diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py new file mode 100644 index 0000000000..593b1714c7 --- /dev/null +++ b/synapse/storage/event_actions.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import SQLBaseStore +from twisted.internet import defer + +import logging +import simplejson as json + +logger = logging.getLogger(__name__) + + +class EventActionsStore(SQLBaseStore): + @defer.inlineCallbacks + def set_actions_for_event(self, event_id, user_id, profile_tag, actions): + actionsJson = json.dumps(actions) + + ret = yield self.runInteraction( + "_set_actions_for_event", + self._simple_upsert_txn, + EventActionsTable.table_name, + {'event_id': event_id, 'user_id': user_id, 'profile_tag': profile_tag}, + {'actions': actionsJson} + ) + defer.returnValue(ret) + + +class EventActionsTable(object): + table_name = "event_actions" diff --git a/synapse/storage/schema/delta/27/event_actions.sql b/synapse/storage/schema/delta/27/event_actions.sql new file mode 100644 index 0000000000..1246823a00 --- /dev/null +++ b/synapse/storage/schema/delta/27/event_actions.sql @@ -0,0 +1,25 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_actions( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (event_id, user_id, profile_tag) +); + + +CREATE INDEX event_actions_event_id_user_id_profile_tag on event_actions(event_id, user_id, profile_tag); -- cgit 1.4.1 From 42ad49f5b75c2c645c4060026c21c5572f5b1063 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Dec 2015 18:42:09 +0000 Subject: still very WIP, but now sends unread_notifications_count in the room object on sync (only actually corrrect in a full sync: hardcoded to 0 in incremental syncs). --- synapse/handlers/sync.py | 26 +++++++++++ synapse/push/action_generator.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 1 + synapse/storage/event_actions.py | 53 ++++++++++++++++++++++- synapse/storage/schema/delta/27/event_actions.sql | 5 ++- 5 files changed, 82 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 24c2b2fad6..6d193a10c4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -52,6 +52,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ "state", # dict[(str, str), FrozenEvent] "ephemeral", "account_data", + "unread_notification_count", ])): __slots__ = [] @@ -64,6 +65,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ or self.state or self.ephemeral or self.account_data + or self.unread_notification_count > 0 ) @@ -161,6 +163,18 @@ class SyncHandler(BaseHandler): else: return self.incremental_sync_with_gap(sync_config, since_token) + def last_read_event_id_for_room_and_user(self, room_id, user_id, ephemeral_by_room): + if room_id not in ephemeral_by_room: + return None + for e in ephemeral_by_room[room_id]: + if e['type'] != 'm.receipt': + continue + for receipt_event_id,val in e['content'].items(): + if 'm.read' in val: + if user_id in val['m.read']: + return receipt_event_id + return None + @defer.inlineCallbacks def full_state_sync(self, sync_config, timeline_since_token): """Get a sync for a client which is starting without any state. @@ -265,6 +279,16 @@ class SyncHandler(BaseHandler): room_id, sync_config, now_token, since_token=timeline_since_token ) + last_unread_event_id = self.last_read_event_id_for_room_and_user( + room_id, sync_config.user.to_string(), ephemeral_by_room + ) + + notifs = [] + if last_unread_event_id: + notifs = yield self.store.get_unread_event_actions_by_room( + room_id, last_unread_event_id + ) + current_state = yield self.get_state_at(room_id, now_token) defer.returnValue(JoinedSyncResult( @@ -275,6 +299,7 @@ class SyncHandler(BaseHandler): account_data=self.account_data_for_room( room_id, tags_by_room, account_data_by_room ), + unread_notification_count=len(notifs) )) def account_data_for_user(self, account_data): @@ -509,6 +534,7 @@ class SyncHandler(BaseHandler): account_data=self.account_data_for_room( room_id, tags_by_room, account_data_by_room ), + unread_notification_count=0 ) logger.debug("Result for room %s: %r", room_id, room_sync) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index a72a7d703c..1c7cd31666 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -45,5 +45,5 @@ class ActionGenerator: logger.info("actions for user %s: %s", uid, actions) if len(actions): self.store.set_actions_for_event( - event['event_id'], uid, None, actions + event, uid, None, actions ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index f0a637a6da..4ca10732c1 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -304,6 +304,7 @@ class SyncRestServlet(RestServlet): }, "state": {"events": serialized_state}, "account_data": {"events": account_data}, + "unread_notification_count": room.unread_notification_count } if joined: diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py index 593b1714c7..40ac8e2d27 100644 --- a/synapse/storage/event_actions.py +++ b/synapse/storage/event_actions.py @@ -24,18 +24,67 @@ logger = logging.getLogger(__name__) class EventActionsStore(SQLBaseStore): @defer.inlineCallbacks - def set_actions_for_event(self, event_id, user_id, profile_tag, actions): + def set_actions_for_event(self, event, user_id, profile_tag, actions): actionsJson = json.dumps(actions) ret = yield self.runInteraction( "_set_actions_for_event", self._simple_upsert_txn, EventActionsTable.table_name, - {'event_id': event_id, 'user_id': user_id, 'profile_tag': profile_tag}, + { + 'room_id': event['room_id'], + 'event_id': event['event_id'], + 'user_id': user_id, + 'profile_tag': profile_tag + }, {'actions': actionsJson} ) defer.returnValue(ret) + @defer.inlineCallbacks + def get_unread_event_actions_by_room(self, room_id, last_read_event_id): + #events = yield self._get_events( + # [last_read_event_id], + # check_redacted=False + #) + + def _get_unread_event_actions_by_room(txn): + sql = ( + "SELECT stream_ordering, topological_ordering" + " FROM events" + " WHERE room_id = ? AND event_id = ?" + ) + txn.execute( + sql, (room_id, last_read_event_id) + ) + results = txn.fetchall() + if len(results) == 0: + return [] + + stream_ordering = results[0][0] + topological_ordering = results[0][1] + + sql = ( + "SELECT ea.actions" + " FROM event_actions ea, events e" + " WHERE ea.room_id = e.room_id" + " AND ea.event_id = e.event_id" + " AND ea.room_id = ?" + " AND (" + " e.topological_ordering > ?" + " OR (e.topological_ordering == ? AND e.stream_ordering > ?)" + ")" + ) + txn.execute(sql, + (room_id, topological_ordering, topological_ordering, stream_ordering) + ) + return txn.fetchall() + + ret = yield self.runInteraction( + "get_unread_event_actions_by_room", + _get_unread_event_actions_by_room + ) + defer.returnValue(ret) class EventActionsTable(object): table_name = "event_actions" diff --git a/synapse/storage/schema/delta/27/event_actions.sql b/synapse/storage/schema/delta/27/event_actions.sql index 1246823a00..bbdaee990e 100644 --- a/synapse/storage/schema/delta/27/event_actions.sql +++ b/synapse/storage/schema/delta/27/event_actions.sql @@ -14,12 +14,13 @@ */ CREATE TABLE IF NOT EXISTS event_actions( + room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, - CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (event_id, user_id, profile_tag) + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) ); -CREATE INDEX event_actions_event_id_user_id_profile_tag on event_actions(event_id, user_id, profile_tag); +CREATE INDEX event_actions_room_id_event_id_user_id_profile_tag on event_actions(room_id, event_id, user_id, profile_tag); -- cgit 1.4.1 From 413d0d6a2404c579b1fa39ece9a698f9df8349db Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 18 Dec 2015 17:47:00 +0000 Subject: Make unread notification count sending work: put the correct count in incremental syncs too, where necessary, and fix silly bugs like only select the event actions for that user... --- synapse/handlers/sync.py | 48 +++++++++++++++++++++++++++++++--------- synapse/storage/event_actions.py | 21 ++++++++++-------- 2 files changed, 49 insertions(+), 20 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6d193a10c4..44420a063a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -65,7 +65,8 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ or self.state or self.ephemeral or self.account_data - or self.unread_notification_count > 0 + # nb the notification count does not, er, count: if there's nothing + # else in the result, we don't need to send it. ) @@ -279,15 +280,12 @@ class SyncHandler(BaseHandler): room_id, sync_config, now_token, since_token=timeline_since_token ) - last_unread_event_id = self.last_read_event_id_for_room_and_user( - room_id, sync_config.user.to_string(), ephemeral_by_room + notifs = yield self.unread_notifs_for_room_id( + room_id, sync_config, ephemeral_by_room ) - - notifs = [] - if last_unread_event_id: - notifs = yield self.store.get_unread_event_actions_by_room( - room_id, last_unread_event_id - ) + notif_count = None + if notifs is not None: + notif_count = len(notifs) current_state = yield self.get_state_at(room_id, now_token) @@ -299,7 +297,7 @@ class SyncHandler(BaseHandler): account_data=self.account_data_for_room( room_id, tags_by_room, account_data_by_room ), - unread_notification_count=len(notifs) + unread_notification_count=notif_count )) def account_data_for_user(self, account_data): @@ -441,6 +439,10 @@ class SyncHandler(BaseHandler): ) now_token = now_token.copy_and_replace("presence_key", presence_key) + _, all_ephemeral_by_room = yield self.ephemeral_by_room( + sync_config, now_token + ) + now_token, ephemeral_by_room = yield self.ephemeral_by_room( sync_config, now_token, since_token ) @@ -514,6 +516,13 @@ class SyncHandler(BaseHandler): else: prev_batch = now_token + notifs = yield self.unread_notifs_for_room_id( + room_id, sync_config, all_ephemeral_by_room + ) + notif_count = None + if notifs is not None: + notif_count = len(notifs) + just_joined = yield self.check_joined_room(sync_config, state) if just_joined: logger.debug("User has just joined %s: needs full state", @@ -534,7 +543,7 @@ class SyncHandler(BaseHandler): account_data=self.account_data_for_room( room_id, tags_by_room, account_data_by_room ), - unread_notification_count=0 + unread_notification_count=notif_count ) logger.debug("Result for room %s: %r", room_id, room_sync) @@ -805,3 +814,20 @@ class SyncHandler(BaseHandler): if join_event.content["membership"] == Membership.JOIN: return True return False + + @defer.inlineCallbacks + def unread_notifs_for_room_id(self, room_id, sync_config, ephemeral_by_room): + last_unread_event_id = self.last_read_event_id_for_room_and_user( + room_id, sync_config.user.to_string(), ephemeral_by_room + ) + + notifs = [] + if last_unread_event_id: + notifs = yield self.store.get_unread_event_actions_by_room_for_user( + room_id, sync_config.user.to_string(), last_unread_event_id + ) + else: + # There is no new information in this period, so your notification + # count is whatever it was last time. + defer.returnValue(None) + defer.returnValue(notifs) \ No newline at end of file diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py index 40ac8e2d27..f7fe78e554 100644 --- a/synapse/storage/event_actions.py +++ b/synapse/storage/event_actions.py @@ -42,12 +42,9 @@ class EventActionsStore(SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def get_unread_event_actions_by_room(self, room_id, last_read_event_id): - #events = yield self._get_events( - # [last_read_event_id], - # check_redacted=False - #) - + def get_unread_event_actions_by_room_for_user( + self, room_id, user_id, last_read_event_id + ): def _get_unread_event_actions_by_room(txn): sql = ( "SELECT stream_ordering, topological_ordering" @@ -65,10 +62,11 @@ class EventActionsStore(SQLBaseStore): topological_ordering = results[0][1] sql = ( - "SELECT ea.actions" + "SELECT ea.event_id, ea.actions" " FROM event_actions ea, events e" " WHERE ea.room_id = e.room_id" " AND ea.event_id = e.event_id" + " AND ea.user_id = ?" " AND ea.room_id = ?" " AND (" " e.topological_ordering > ?" @@ -76,9 +74,14 @@ class EventActionsStore(SQLBaseStore): ")" ) txn.execute(sql, - (room_id, topological_ordering, topological_ordering, stream_ordering) + ( + user_id, room_id, + topological_ordering, topological_ordering, stream_ordering + ) ) - return txn.fetchall() + return [ + { "event_id": row[0], "actions": row[1] } for row in txn.fetchall() + ] ret = yield self.runInteraction( "get_unread_event_actions_by_room", -- cgit 1.4.1 From 091c545c4fb38f662b61cb46779a813f70971e4f Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 21 Dec 2015 10:14:57 +0000 Subject: pep8 --- synapse/handlers/sync.py | 6 +++--- synapse/storage/event_actions.py | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 44420a063a..20b2a2595a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -170,7 +170,7 @@ class SyncHandler(BaseHandler): for e in ephemeral_by_room[room_id]: if e['type'] != 'm.receipt': continue - for receipt_event_id,val in e['content'].items(): + for receipt_event_id, val in e['content'].items(): if 'm.read' in val: if user_id in val['m.read']: return receipt_event_id @@ -281,7 +281,7 @@ class SyncHandler(BaseHandler): ) notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, ephemeral_by_room + room_id, sync_config, ephemeral_by_room ) notif_count = None if notifs is not None: @@ -830,4 +830,4 @@ class SyncHandler(BaseHandler): # There is no new information in this period, so your notification # count is whatever it was last time. defer.returnValue(None) - defer.returnValue(notifs) \ No newline at end of file + defer.returnValue(notifs) diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py index f7fe78e554..fbd0a42279 100644 --- a/synapse/storage/event_actions.py +++ b/synapse/storage/event_actions.py @@ -73,14 +73,13 @@ class EventActionsStore(SQLBaseStore): " OR (e.topological_ordering == ? AND e.stream_ordering > ?)" ")" ) - txn.execute(sql, - ( - user_id, room_id, - topological_ordering, topological_ordering, stream_ordering - ) + txn.execute(sql, ( + user_id, room_id, + topological_ordering, topological_ordering, stream_ordering + ) ) return [ - { "event_id": row[0], "actions": row[1] } for row in txn.fetchall() + {"event_id": row[0], "actions": row[1]} for row in txn.fetchall() ] ret = yield self.runInteraction( @@ -89,5 +88,6 @@ class EventActionsStore(SQLBaseStore): ) defer.returnValue(ret) + class EventActionsTable(object): table_name = "event_actions" -- cgit 1.4.1 From 65c451cb3878fb41f28a2adecd638894e18f5343 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 22 Dec 2015 15:19:34 +0000 Subject: Add bulk push rule evaluator which actually still evaluates rules one by one, but does far fewer db queries to fetch the rules --- synapse/push/action_generator.py | 28 ++++----- synapse/push/bulk_push_rule_evaluator.py | 99 ++++++++++++++++++++++++++++++++ synapse/push/push_rule_evaluator.py | 13 +++-- synapse/storage/push_rule.py | 41 +++++++++++++ 4 files changed, 159 insertions(+), 22 deletions(-) create mode 100644 synapse/push/bulk_push_rule_evaluator.py (limited to 'synapse/storage') diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 6e107ca792..2ad5f82da2 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -15,9 +15,7 @@ from twisted.internet import defer -from synapse.types import UserID - -import push_rule_evaluator +import bulk_push_rule_evaluator import logging @@ -39,17 +37,13 @@ class ActionGenerator: def handle_event(self, event): users = yield self.store.get_users_in_room(event['room_id']) - for uid in users: - if not self.hs.is_mine(UserID.from_string(uid)): - continue - - evaluator = yield push_rule_evaluator.\ - evaluator_for_user_name_and_profile_tag( - uid, None, event['room_id'], self.store - ) - actions = yield evaluator.actions_for_event(event) - logger.info("actions for user %s: %s", uid, actions) - if len(actions): - self.store.set_actions_for_event( - event, uid, None, actions - ) + bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( + event['room_id'], self.hs, self.store + ) + + actions_by_user = bulk_evaluator.action_for_event_by_user(event) + + for uid,actions in actions_by_user.items(): + self.store.set_actions_for_event( + event, uid, None, actions + ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py new file mode 100644 index 0000000000..f531d2edc4 --- /dev/null +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import simplejson as json + +from twisted.internet import defer + +from synapse.types import UserID + +import baserules +from push_rule_evaluator import PushRuleEvaluator + +logger = logging.getLogger(__name__) + + +def decode_rule_json(rule): + rule['conditions'] = json.loads(rule['conditions']) + rule['actions'] = json.loads(rule['actions']) + return rule + + +@defer.inlineCallbacks +def evaluator_for_room_id(room_id, hs, store): + users = yield store.get_users_in_room(room_id) + rules_by_user = yield store.bulk_get_push_rules(users) + rules_by_user = { + uid: baserules.list_with_base_rules( + [decode_rule_json(rule_list) for rule_list in rules_by_user[uid]] + if uid in rules_by_user else [], + UserID.from_string(uid) + ) + for uid in users + } + member_events = yield store.get_current_state( + room_id=room_id, + event_type='m.room.member', + ) + display_names = {} + for ev in member_events: + if ev.content.get("displayname"): + display_names[ev.state_key] = ev.content.get("displayname") + + defer.returnValue(BulkPushRuleEvaluator( + room_id, rules_by_user, display_names, users + )) + + +class BulkPushRuleEvaluator: + def __init__(self, room_id, rules_by_user, display_names, users_in_room): + self.room_id = room_id + self.rules_by_user = rules_by_user + self.display_names = display_names + self.users_in_room = users_in_room + + def action_for_event_by_user(self, event): + actions_by_user = {} + + for uid, rules in self.rules_by_user.items(): + display_name = None + if uid in self.display_names: + display_name = self.display_names[uid] + + for rule in rules: + if 'enabled' in rule and not rule['enabled']: + continue + + # XXX: profile tags + if BulkPushRuleEvaluator.event_matches_rule( + event, rule, + display_name, len(self.users_in_room), None + ): + actions = [x for x in rule['actions'] if x != 'dont_notify'] + if len(actions) > 0: + actions_by_user[uid] = actions + break + return actions_by_user + + @staticmethod + def event_matches_rule(event, rule, + display_name, room_member_count, profile_tag): + matches = True + for cond in rule['conditions']: + matches &= PushRuleEvaluator._event_fulfills_condition( + event, cond, display_name, room_member_count, profile_tag + ) + return matches \ No newline at end of file diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 420476fd0b..40c7622ec4 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -113,7 +113,8 @@ class PushRuleEvaluator: for c in conditions: matches &= self._event_fulfills_condition( ev, c, display_name=my_display_name, - room_member_count=room_member_count + room_member_count=room_member_count, + profile_tag=self.profile_tag ) logger.debug( "Rule %s %s", @@ -156,16 +157,18 @@ class PushRuleEvaluator: re.sub(r'\\\-', '-', x.group(2)))), r) return r - def _event_fulfills_condition(self, ev, condition, display_name, room_member_count): + @staticmethod + def _event_fulfills_condition(ev, condition, + display_name, room_member_count, profile_tag): if condition['kind'] == 'event_match': if 'pattern' not in condition: logger.warn("event_match condition with no pattern") return False # XXX: optimisation: cache our pattern regexps if condition['key'] == 'content.body': - r = r'\b%s\b' % self._glob_to_regexp(condition['pattern']) + r = r'\b%s\b' % PushRuleEvaluator._glob_to_regexp(condition['pattern']) else: - r = r'^%s$' % self._glob_to_regexp(condition['pattern']) + r = r'^%s$' % PushRuleEvaluator._glob_to_regexp(condition['pattern']) val = _value_for_dotted_key(condition['key'], ev) if val is None: return False @@ -174,7 +177,7 @@ class PushRuleEvaluator: elif condition['kind'] == 'device': if 'profile_tag' not in condition: return True - return condition['profile_tag'] == self.profile_tag + return condition['profile_tag'] == profile_tag elif condition['kind'] == 'contains_display_name': # This is special because display names can be different diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 5305b7e122..9dec4aa685 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -55,6 +55,47 @@ class PushRuleStore(SQLBaseStore): r['rule_id']: False if r['enabled'] == 0 else True for r in results }) + @defer.inlineCallbacks + def bulk_get_push_rules(self, user_ids): + batch_size = 100 + + def f(txn, user_ids_to_fetch): + sql = ( + "SELECT " + + ",".join(map(lambda x: "pr."+x, PushRuleTable.fields)) + + " FROM " + PushRuleTable.table_name + " pr " + + " LEFT JOIN " + PushRuleEnableTable.table_name + " pre " + + " ON pr.user_name = pre.user_name and pr.rule_id = pre.rule_id " + + " WHERE pr.user_name " + + " IN (" + ",".join(["?" for _ in user_ids_to_fetch]) + ")" + " AND (pre.enabled is null or pre.enabled = 1)" + " ORDER BY pr.user_name, pr.priority_class DESC, pr.priority DESC" + ) + txn.execute(sql, user_ids_to_fetch) + return txn.fetchall() + + results = {} + + batch_start = 0 + while batch_start < len(user_ids): + batch_end = max(len(user_ids), batch_size) + batch_user_ids = user_ids[batch_start:batch_end] + batch_start = batch_end + + rows = yield self.runInteraction( + "bulk_get_push_rules", f, batch_user_ids + ) + + for r in rows: + rawdict = { + PushRuleTable.fields[i]: r[i] for i in range(len(r)) + } + + if rawdict['user_name'] not in results: + results[rawdict['user_name']] = [] + results[rawdict['user_name']].append(rawdict) + defer.returnValue(results) + @defer.inlineCallbacks def add_push_rule(self, before, after, **kwargs): vals = kwargs -- cgit 1.4.1 From 4c8f6a7e427cc0e22ff1a19c3f1d9da0f9438f18 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 22 Dec 2015 17:04:31 +0000 Subject: Insert push actions in a single db query rather than one per user/profile_tag --- synapse/push/action_generator.py | 10 ++++++---- synapse/storage/event_actions.py | 31 ++++++++++++++++++------------- 2 files changed, 24 insertions(+), 17 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 2ad5f82da2..148b1bda8e 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -43,7 +43,9 @@ class ActionGenerator: actions_by_user = bulk_evaluator.action_for_event_by_user(event) - for uid,actions in actions_by_user.items(): - self.store.set_actions_for_event( - event, uid, None, actions - ) + yield self.store.set_actions_for_event_and_users( + event, + [ + (uid, None, actions) for uid, actions in actions_by_user.items() + ] + ) diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py index fbd0a42279..3efa445c18 100644 --- a/synapse/storage/event_actions.py +++ b/synapse/storage/event_actions.py @@ -24,22 +24,27 @@ logger = logging.getLogger(__name__) class EventActionsStore(SQLBaseStore): @defer.inlineCallbacks - def set_actions_for_event(self, event, user_id, profile_tag, actions): - actionsJson = json.dumps(actions) - - ret = yield self.runInteraction( - "_set_actions_for_event", - self._simple_upsert_txn, - EventActionsTable.table_name, - { + def set_actions_for_event_and_users(self, event, tuples): + """ + :param event: the event set actions for + :param tuples: list of tuples of (user_id, profile_tag, actions) + """ + values = [] + for uid, profile_tag, actions in tuples: + values.append({ 'room_id': event['room_id'], 'event_id': event['event_id'], - 'user_id': user_id, - 'profile_tag': profile_tag - }, - {'actions': actionsJson} + 'user_id': uid, + 'profile_tag': profile_tag, + 'actions': json.dumps(actions) + }) + + yield self.runInteraction( + "set_actions_for_event_and_users", + self._simple_insert_many_txn, + EventActionsTable.table_name, + values ) - defer.returnValue(ret) @defer.inlineCallbacks def get_unread_event_actions_by_room_for_user( -- cgit 1.4.1 From 3051c9d002a467643d1ab32bc36974d2e3f84c12 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 4 Jan 2016 13:39:29 +0000 Subject: Address minor PR issues --- synapse/handlers/_base.py | 4 ++-- synapse/handlers/federation.py | 4 ++-- synapse/push/action_generator.py | 7 +++---- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/storage/event_actions.py | 2 +- synapse/storage/push_rule.py | 6 +++--- synapse/storage/registration.py | 12 ------------ 7 files changed, 12 insertions(+), 25 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 24c4c62698..938eb29de7 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -267,8 +267,8 @@ class BaseHandler(object): event, context=context ) - action_generator = ActionGenerator(self.hs, self.store) - yield action_generator.handle_event(serialize_event( + action_generator = ActionGenerator(self.store) + yield action_generator.handle_push_actions_for_event(serialize_event( event, self.clock.time_msec() )) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0b1221deb5..764709b424 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -245,8 +245,8 @@ class FederationHandler(BaseHandler): yield user_joined_room(self.distributor, user, event.room_id) if not backfilled and not event.internal_metadata.is_outlier(): - action_generator = ActionGenerator(self.hs, self.store) - yield action_generator.handle_event(serialize_event( + action_generator = ActionGenerator(self.store) + yield action_generator.handle_push_actions_for_event(serialize_event( event, self.clock.time_msec()) ) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 00f518f609..4ab5d9e1b8 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -23,8 +23,7 @@ logger = logging.getLogger(__name__) class ActionGenerator: - def __init__(self, hs, store): - self.hs = hs + def __init__(self, store): self.store = store # really we want to get all user ids and all profile tags too, # since we want the actions for each profile tag for every user and @@ -34,9 +33,9 @@ class ActionGenerator: # tag (ie. we just need all the users). @defer.inlineCallbacks - def handle_event(self, event): + def handle_push_actions_for_event(self, event): bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( - event['room_id'], self.hs, self.store + event['room_id'], self.store ) actions_by_user = bulk_evaluator.action_for_event_by_user(event) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c489bfc8d4..1c4e54ba44 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -33,7 +33,7 @@ def decode_rule_json(rule): @defer.inlineCallbacks -def evaluator_for_room_id(room_id, hs, store): +def evaluator_for_room_id(room_id, store): users = yield store.get_users_in_room(room_id) rules_by_user = yield store.bulk_get_push_rules(users) rules_by_user = { diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py index 3efa445c18..fa9cbe71ee 100644 --- a/synapse/storage/event_actions.py +++ b/synapse/storage/event_actions.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 OpenMarket Ltd +# Copyright 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 9dec4aa685..7c5123d644 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -62,12 +62,12 @@ class PushRuleStore(SQLBaseStore): def f(txn, user_ids_to_fetch): sql = ( "SELECT " + - ",".join(map(lambda x: "pr."+x, PushRuleTable.fields)) + + ",".join("pr."+x for x in PushRuleTable.fields) + " FROM " + PushRuleTable.table_name + " pr " + " LEFT JOIN " + PushRuleEnableTable.table_name + " pre " + " ON pr.user_name = pre.user_name and pr.rule_id = pre.rule_id " + " WHERE pr.user_name " + - " IN (" + ",".join(["?" for _ in user_ids_to_fetch]) + ")" + " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" " AND (pre.enabled is null or pre.enabled = 1)" " ORDER BY pr.user_name, pr.priority_class DESC, pr.priority DESC" ) @@ -78,7 +78,7 @@ class PushRuleStore(SQLBaseStore): batch_start = 0 while batch_start < len(user_ids): - batch_end = max(len(user_ids), batch_size) + batch_end = min(len(user_ids), batch_size) batch_user_ids = user_ids[batch_start:batch_end] batch_start = batch_end diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 4676f225b9..09a05b08ef 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -291,18 +291,6 @@ class RegistrationStore(SQLBaseStore): defer.returnValue(ret['user_id']) defer.returnValue(None) - @defer.inlineCallbacks - def get_all_user_ids(self): - """Returns all user ids registered on this homeserver""" - return self.runInteraction( - "get_all_user_ids", - self._get_all_user_ids_txn - ) - - def _get_all_user_ids_txn(self, txn): - txn.execute("SELECT name from users") - return [r[0] for r in txn.fetchall()] - @defer.inlineCallbacks def count_all_users(self): """Counts all users registered on the homeserver.""" -- cgit 1.4.1 From c914d67cda9682331639b78190db367974e4fb8b Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 4 Jan 2016 14:05:37 +0000 Subject: Rename event-actions to event_push_actions as per PR request --- synapse/handlers/sync.py | 2 +- synapse/push/__init__.py | 2 +- synapse/push/action_generator.py | 2 +- synapse/storage/__init__.py | 4 +- synapse/storage/event_actions.py | 98 ---------------------- synapse/storage/event_push_actions.py | 98 ++++++++++++++++++++++ synapse/storage/schema/delta/27/event_actions.sql | 26 ------ .../storage/schema/delta/27/event_push_actions.sql | 26 ++++++ 8 files changed, 129 insertions(+), 129 deletions(-) delete mode 100644 synapse/storage/event_actions.py create mode 100644 synapse/storage/event_push_actions.py delete mode 100644 synapse/storage/schema/delta/27/event_actions.sql create mode 100644 synapse/storage/schema/delta/27/event_push_actions.sql (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f63c073a20..64556c5eb8 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -896,7 +896,7 @@ class SyncHandler(BaseHandler): notifs = [] if last_unread_event_id: - notifs = yield self.store.get_unread_event_actions_by_room_for_user( + notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id ) else: diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 250f22a168..3ab6da0625 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -27,7 +27,7 @@ import random logger = logging.getLogger(__name__) -# Pushers could now be moved to pull out of the event_actions table instead +# Pushers could now be moved to pull out of the event_push_actions table instead # of listening on the event stream: this would avoid them having to run the # rules again. class Pusher(object): diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 4ab5d9e1b8..5526324a6d 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -40,7 +40,7 @@ class ActionGenerator: actions_by_user = bulk_evaluator.action_for_event_by_user(event) - yield self.store.set_actions_for_event_and_users( + yield self.store.set_push_actions_for_event_and_users( event, [ (uid, None, actions) for uid, actions in actions_by_user.items() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a112dd237f..43e05f144a 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -33,7 +33,7 @@ from .pusher import PusherStore from .push_rule import PushRuleStore from .media_repository import MediaRepositoryStore from .rejections import RejectionsStore -from .event_actions import EventActionsStore +from .event_push_actions import EventPushActionsStore from .state import StateStore from .signatures import SignatureStore @@ -76,7 +76,7 @@ class DataStore(RoomMemberStore, RoomStore, SearchStore, TagsStore, AccountDataStore, - EventActionsStore + EventPushActionsStore ): def __init__(self, hs): diff --git a/synapse/storage/event_actions.py b/synapse/storage/event_actions.py deleted file mode 100644 index fa9cbe71ee..0000000000 --- a/synapse/storage/event_actions.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ._base import SQLBaseStore -from twisted.internet import defer - -import logging -import simplejson as json - -logger = logging.getLogger(__name__) - - -class EventActionsStore(SQLBaseStore): - @defer.inlineCallbacks - def set_actions_for_event_and_users(self, event, tuples): - """ - :param event: the event set actions for - :param tuples: list of tuples of (user_id, profile_tag, actions) - """ - values = [] - for uid, profile_tag, actions in tuples: - values.append({ - 'room_id': event['room_id'], - 'event_id': event['event_id'], - 'user_id': uid, - 'profile_tag': profile_tag, - 'actions': json.dumps(actions) - }) - - yield self.runInteraction( - "set_actions_for_event_and_users", - self._simple_insert_many_txn, - EventActionsTable.table_name, - values - ) - - @defer.inlineCallbacks - def get_unread_event_actions_by_room_for_user( - self, room_id, user_id, last_read_event_id - ): - def _get_unread_event_actions_by_room(txn): - sql = ( - "SELECT stream_ordering, topological_ordering" - " FROM events" - " WHERE room_id = ? AND event_id = ?" - ) - txn.execute( - sql, (room_id, last_read_event_id) - ) - results = txn.fetchall() - if len(results) == 0: - return [] - - stream_ordering = results[0][0] - topological_ordering = results[0][1] - - sql = ( - "SELECT ea.event_id, ea.actions" - " FROM event_actions ea, events e" - " WHERE ea.room_id = e.room_id" - " AND ea.event_id = e.event_id" - " AND ea.user_id = ?" - " AND ea.room_id = ?" - " AND (" - " e.topological_ordering > ?" - " OR (e.topological_ordering == ? AND e.stream_ordering > ?)" - ")" - ) - txn.execute(sql, ( - user_id, room_id, - topological_ordering, topological_ordering, stream_ordering - ) - ) - return [ - {"event_id": row[0], "actions": row[1]} for row in txn.fetchall() - ] - - ret = yield self.runInteraction( - "get_unread_event_actions_by_room", - _get_unread_event_actions_by_room - ) - defer.returnValue(ret) - - -class EventActionsTable(object): - table_name = "event_actions" diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py new file mode 100644 index 0000000000..016c0adf8a --- /dev/null +++ b/synapse/storage/event_push_actions.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import SQLBaseStore +from twisted.internet import defer + +import logging +import simplejson as json + +logger = logging.getLogger(__name__) + + +class EventPushActionsStore(SQLBaseStore): + @defer.inlineCallbacks + def set_push_actions_for_event_and_users(self, event, tuples): + """ + :param event: the event set actions for + :param tuples: list of tuples of (user_id, profile_tag, actions) + """ + values = [] + for uid, profile_tag, actions in tuples: + values.append({ + 'room_id': event['room_id'], + 'event_id': event['event_id'], + 'user_id': uid, + 'profile_tag': profile_tag, + 'actions': json.dumps(actions) + }) + + yield self.runInteraction( + "set_actions_for_event_and_users", + self._simple_insert_many_txn, + EventPushActionsTable.table_name, + values + ) + + @defer.inlineCallbacks + def get_unread_event_push_actions_by_room_for_user( + self, room_id, user_id, last_read_event_id + ): + def _get_unread_event_push_actions_by_room(txn): + sql = ( + "SELECT stream_ordering, topological_ordering" + " FROM events" + " WHERE room_id = ? AND event_id = ?" + ) + txn.execute( + sql, (room_id, last_read_event_id) + ) + results = txn.fetchall() + if len(results) == 0: + return [] + + stream_ordering = results[0][0] + topological_ordering = results[0][1] + + sql = ( + "SELECT ea.event_id, ea.actions" + " FROM event_push_actions ea, events e" + " WHERE ea.room_id = e.room_id" + " AND ea.event_id = e.event_id" + " AND ea.user_id = ?" + " AND ea.room_id = ?" + " AND (" + " e.topological_ordering > ?" + " OR (e.topological_ordering == ? AND e.stream_ordering > ?)" + ")" + ) + txn.execute(sql, ( + user_id, room_id, + topological_ordering, topological_ordering, stream_ordering + ) + ) + return [ + {"event_id": row[0], "actions": row[1]} for row in txn.fetchall() + ] + + ret = yield self.runInteraction( + "get_unread_event_push_actions_by_room", + _get_unread_event_push_actions_by_room + ) + defer.returnValue(ret) + + +class EventPushActionsTable(object): + table_name = "event_push_actions" diff --git a/synapse/storage/schema/delta/27/event_actions.sql b/synapse/storage/schema/delta/27/event_actions.sql deleted file mode 100644 index bbdaee990e..0000000000 --- a/synapse/storage/schema/delta/27/event_actions.sql +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_actions( - room_id TEXT NOT NULL, - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - profile_tag VARCHAR(32), - actions TEXT NOT NULL, - CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) -); - - -CREATE INDEX event_actions_room_id_event_id_user_id_profile_tag on event_actions(room_id, event_id, user_id, profile_tag); diff --git a/synapse/storage/schema/delta/27/event_push_actions.sql b/synapse/storage/schema/delta/27/event_push_actions.sql new file mode 100644 index 0000000000..bdf6ae3f24 --- /dev/null +++ b/synapse/storage/schema/delta/27/event_push_actions.sql @@ -0,0 +1,26 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_push_actions( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) +); + + +CREATE INDEX event_push_actions_room_id_event_id_user_id_profile_tag on event_push_actions(room_id, event_id, user_id, profile_tag); -- cgit 1.4.1 From 4eb7b950c829dd8463df8ccd1095772452293a15 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 4 Jan 2016 18:11:17 +0000 Subject: = not == in sql --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 016c0adf8a..3075d02257 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -75,7 +75,7 @@ class EventPushActionsStore(SQLBaseStore): " AND ea.room_id = ?" " AND (" " e.topological_ordering > ?" - " OR (e.topological_ordering == ? AND e.stream_ordering > ?)" + " OR (e.topological_ordering = ? AND e.stream_ordering > ?)" ")" ) txn.execute(sql, ( -- cgit 1.4.1 From cfd07aafff71b452a01265f304172f56b2c49759 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 5 Jan 2016 18:01:18 +0000 Subject: Allow guests to upgrade their accounts --- synapse/api/auth.py | 6 ++-- synapse/handlers/auth.py | 6 ++-- synapse/handlers/register.py | 37 +++++++++++++++++------ synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/rest/client/v2_alpha/register.py | 12 ++++++-- synapse/rest/media/v1/thumbnail_resource.py | 2 +- synapse/storage/prepare_database.py | 4 +-- synapse/storage/registration.py | 23 +++++++++----- synapse/storage/schema/delta/28/upgrade_times.sql | 21 +++++++++++++ tests/api/test_auth.py | 18 +++++------ 11 files changed, 93 insertions(+), 40 deletions(-) create mode 100644 synapse/storage/schema/delta/28/upgrade_times.sql (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index adb7d64482..b86c6c8399 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -583,7 +583,7 @@ class Auth(object): AuthError if no user by that token exists or the token is invalid. """ try: - ret = yield self._get_user_from_macaroon(token) + ret = yield self.get_user_from_macaroon(token) except AuthError: # TODO(daniel): Remove this fallback when all existing access tokens # have been re-issued as macaroons. @@ -591,7 +591,7 @@ class Auth(object): defer.returnValue(ret) @defer.inlineCallbacks - def _get_user_from_macaroon(self, macaroon_str): + def get_user_from_macaroon(self, macaroon_str): try: macaroon = pymacaroons.Macaroon.deserialize(macaroon_str) self.validate_macaroon(macaroon, "access", False) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index e64b67cdfd..62e82a2570 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -408,7 +408,7 @@ class AuthHandler(BaseHandler): macaroon = pymacaroons.Macaroon.deserialize(login_token) auth_api = self.hs.get_auth() auth_api.validate_macaroon(macaroon, "login", True) - return self._get_user_from_macaroon(macaroon) + return self.get_user_from_macaroon(macaroon) except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError): raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN) @@ -421,7 +421,7 @@ class AuthHandler(BaseHandler): macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) return macaroon - def _get_user_from_macaroon(self, macaroon): + def get_user_from_macaroon(self, macaroon): user_prefix = "user_id = " for caveat in macaroon.caveats: if caveat.caveat_id.startswith(user_prefix): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index baf7c14e40..6f111ff63e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,12 +40,13 @@ class RegistrationHandler(BaseHandler): def __init__(self, hs): super(RegistrationHandler, self).__init__(hs) + self.auth = hs.get_auth() self.distributor = hs.get_distributor() self.distributor.declare("registered_user") self.captcha_client = CaptchaServerHttpClient(hs) @defer.inlineCallbacks - def check_username(self, localpart): + def check_username(self, localpart, guest_access_token=None): yield run_on_reactor() if urllib.quote(localpart) != localpart: @@ -62,14 +63,29 @@ class RegistrationHandler(BaseHandler): users = yield self.store.get_users_by_id_case_insensitive(user_id) if users: - raise SynapseError( - 400, - "User ID already taken.", - errcode=Codes.USER_IN_USE, - ) + if not guest_access_token: + raise SynapseError( + 400, + "User ID already taken.", + errcode=Codes.USER_IN_USE, + ) + user_data = yield self.auth.get_user_from_macaroon(guest_access_token) + if not user_data["is_guest"] or user_data["user"].localpart != localpart: + raise AuthError( + 403, + "Cannot register taken user ID without valid guest " + "credentials for that user.", + errcode=Codes.FORBIDDEN, + ) @defer.inlineCallbacks - def register(self, localpart=None, password=None, generate_token=True): + def register( + self, + localpart=None, + password=None, + generate_token=True, + guest_access_token=None + ): """Registers a new client on the server. Args: @@ -89,7 +105,7 @@ class RegistrationHandler(BaseHandler): password_hash = self.auth_handler().hash(password) if localpart: - yield self.check_username(localpart) + yield self.check_username(localpart, guest_access_token=guest_access_token) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() @@ -100,7 +116,8 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=password_hash + password_hash=password_hash, + was_guest=guest_access_token is not None, ) yield registered_user(self.distributor, user) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0cfeda10d8..6186c37c7c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9796f2a57f..41a42418a9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 OpenMarket Ltd +# Copyright 2015 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b2b89652c6..25389ceded 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 OpenMarket Ltd +# Copyright 2015 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -119,8 +119,13 @@ class RegisterRestServlet(RestServlet): if self.hs.config.disable_registration: raise SynapseError(403, "Registration has been disabled") + guest_access_token = body.get("guest_access_token", None) + if desired_username is not None: - yield self.registration_handler.check_username(desired_username) + yield self.registration_handler.check_username( + desired_username, + guest_access_token=guest_access_token + ) if self.hs.config.enable_registration_captcha: flows = [ @@ -150,7 +155,8 @@ class RegisterRestServlet(RestServlet): (user_id, token) = yield self.registration_handler.register( localpart=desired_username, - password=new_password + password=new_password, + guest_access_token=guest_access_token, ) if result and LoginType.EMAIL_IDENTITY in result: diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 8b8fba3dc7..c18160534e 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 16eff62544..c1f5f99789 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 27 +SCHEMA_VERSION = 28 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 09a05b08ef..f0fa0bd33c 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014, 2015 OpenMarket Ltd +# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -73,30 +73,39 @@ class RegistrationStore(SQLBaseStore): ) @defer.inlineCallbacks - def register(self, user_id, token, password_hash): + def register(self, user_id, token, password_hash, was_guest=False): """Attempts to register an account. Args: user_id (str): The desired user ID to register. token (str): The desired access token to use for this user. password_hash (str): Optional. The password hash for this user. + was_guest (bool): Optional. Whether this is a guest account being + upgraded to a non-guest account. Raises: StoreError if the user_id could not be registered. """ yield self.runInteraction( "register", - self._register, user_id, token, password_hash + self._register, user_id, token, password_hash, was_guest ) - def _register(self, txn, user_id, token, password_hash): + def _register(self, txn, user_id, token, password_hash, was_guest): now = int(self.clock.time()) next_id = self._access_tokens_id_gen.get_next_txn(txn) try: - txn.execute("INSERT INTO users(name, password_hash, creation_ts) " - "VALUES (?,?,?)", - [user_id, password_hash, now]) + if was_guest: + txn.execute("UPDATE users SET" + " password_hash = ?," + " upgrade_ts = ?" + " WHERE name = ?", + [password_hash, now, user_id]) + else: + txn.execute("INSERT INTO users(name, password_hash, creation_ts) " + "VALUES (?,?,?)", + [user_id, password_hash, now]) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE diff --git a/synapse/storage/schema/delta/28/upgrade_times.sql b/synapse/storage/schema/delta/28/upgrade_times.sql new file mode 100644 index 0000000000..3e4a9ab455 --- /dev/null +++ b/synapse/storage/schema/delta/28/upgrade_times.sql @@ -0,0 +1,21 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Stores the timestamp when a user upgraded from a guest to a full user, if + * that happened. + */ + +ALTER TABLE users ADD COLUMN upgrade_ts BIGINT; diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 70d928defe..5ff4c8a873 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015 OpenMarket Ltd +# Copyright 2015 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -154,7 +154,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("type = access") macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) - user_info = yield self.auth._get_user_from_macaroon(macaroon.serialize()) + user_info = yield self.auth.get_user_from_macaroon(macaroon.serialize()) user = user_info["user"] self.assertEqual(UserID.from_string(user_id), user) @@ -171,7 +171,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("guest = true") serialized = macaroon.serialize() - user_info = yield self.auth._get_user_from_macaroon(serialized) + user_info = yield self.auth.get_user_from_macaroon(serialized) user = user_info["user"] is_guest = user_info["is_guest"] self.assertEqual(UserID.from_string(user_id), user) @@ -192,7 +192,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("type = access") macaroon.add_first_party_caveat("user_id = %s" % (user,)) with self.assertRaises(AuthError) as cm: - yield self.auth._get_user_from_macaroon(macaroon.serialize()) + yield self.auth.get_user_from_macaroon(macaroon.serialize()) self.assertEqual(401, cm.exception.code) self.assertIn("User mismatch", cm.exception.msg) @@ -212,7 +212,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("type = access") with self.assertRaises(AuthError) as cm: - yield self.auth._get_user_from_macaroon(macaroon.serialize()) + yield self.auth.get_user_from_macaroon(macaroon.serialize()) self.assertEqual(401, cm.exception.code) self.assertIn("No user caveat", cm.exception.msg) @@ -234,7 +234,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("user_id = %s" % (user,)) with self.assertRaises(AuthError) as cm: - yield self.auth._get_user_from_macaroon(macaroon.serialize()) + yield self.auth.get_user_from_macaroon(macaroon.serialize()) self.assertEqual(401, cm.exception.code) self.assertIn("Invalid macaroon", cm.exception.msg) @@ -257,7 +257,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("cunning > fox") with self.assertRaises(AuthError) as cm: - yield self.auth._get_user_from_macaroon(macaroon.serialize()) + yield self.auth.get_user_from_macaroon(macaroon.serialize()) self.assertEqual(401, cm.exception.code) self.assertIn("Invalid macaroon", cm.exception.msg) @@ -285,11 +285,11 @@ class AuthTestCase(unittest.TestCase): self.hs.clock.now = 5000 # seconds - yield self.auth._get_user_from_macaroon(macaroon.serialize()) + yield self.auth.get_user_from_macaroon(macaroon.serialize()) # TODO(daniel): Turn on the check that we validate expiration, when we # validate expiration (and remove the above line, which will start # throwing). # with self.assertRaises(AuthError) as cm: - # yield self.auth._get_user_from_macaroon(macaroon.serialize()) + # yield self.auth.get_user_from_macaroon(macaroon.serialize()) # self.assertEqual(401, cm.exception.code) # self.assertIn("Invalid macaroon", cm.exception.msg) -- cgit 1.4.1 From c79f221192044203b9d32cfbd416a7fefeb34cd5 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 6 Jan 2016 11:38:09 +0000 Subject: Add is_guest flag to users db to track whether a user is a guest user or not. Use this so we can run _filter_events_for_client when calculating event_push_actions. --- synapse/handlers/_base.py | 8 ++--- synapse/handlers/federation.py | 6 ++-- synapse/handlers/register.py | 4 ++- synapse/push/action_generator.py | 6 ++-- synapse/push/bulk_push_rule_evaluator.py | 27 ++++++++++++--- synapse/rest/client/v2_alpha/register.py | 5 ++- synapse/storage/event_push_actions.py | 4 +-- synapse/storage/registration.py | 40 ++++++++++++++++------ .../storage/schema/delta/27/event_push_actions.sql | 26 -------------- .../storage/schema/delta/28/event_push_actions.sql | 26 ++++++++++++++ 10 files changed, 95 insertions(+), 57 deletions(-) delete mode 100644 synapse/storage/schema/delta/27/event_push_actions.sql create mode 100644 synapse/storage/schema/delta/28/event_push_actions.sql (limited to 'synapse/storage') diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 3115a5065d..66e35de6e4 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -23,8 +23,6 @@ from synapse.push.action_generator import ActionGenerator from synapse.util.logcontext import PreserveLoggingContext -from synapse.events.utils import serialize_event - import logging @@ -256,9 +254,9 @@ class BaseHandler(object): ) action_generator = ActionGenerator(self.store) - yield action_generator.handle_push_actions_for_event(serialize_event( - event, self.clock.time_msec() - )) + yield action_generator.handle_push_actions_for_event( + event, self + ) destinations = set(extra_destinations) for k, s in context.current_state.items(): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 764709b424..075b9e21c3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -32,7 +32,7 @@ from synapse.crypto.event_signing import ( ) from synapse.types import UserID -from synapse.events.utils import prune_event, serialize_event +from synapse.events.utils import prune_event from synapse.util.retryutils import NotRetryingDestination @@ -246,8 +246,8 @@ class FederationHandler(BaseHandler): if not backfilled and not event.internal_metadata.is_outlier(): action_generator = ActionGenerator(self.store) - yield action_generator.handle_push_actions_for_event(serialize_event( - event, self.clock.time_msec()) + yield action_generator.handle_push_actions_for_event( + event, self ) @defer.inlineCallbacks diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 6f111ff63e..1799a668c6 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -84,7 +84,8 @@ class RegistrationHandler(BaseHandler): localpart=None, password=None, generate_token=True, - guest_access_token=None + guest_access_token=None, + make_guest=False ): """Registers a new client on the server. @@ -118,6 +119,7 @@ class RegistrationHandler(BaseHandler): token=token, password_hash=password_hash, was_guest=guest_access_token is not None, + make_guest=make_guest ) yield registered_user(self.distributor, user) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 5526324a6d..bcd40798f9 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -33,12 +33,12 @@ class ActionGenerator: # tag (ie. we just need all the users). @defer.inlineCallbacks - def handle_push_actions_for_event(self, event): + def handle_push_actions_for_event(self, event, handler): bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( - event['room_id'], self.store + event.room_id, self.store ) - actions_by_user = bulk_evaluator.action_for_event_by_user(event) + actions_by_user = yield bulk_evaluator.action_for_event_by_user(event, handler) yield self.store.set_push_actions_for_event_and_users( event, diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c00acfd87e..63d65b4465 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -23,6 +23,8 @@ from synapse.types import UserID import baserules from push_rule_evaluator import PushRuleEvaluator +from synapse.events.utils import serialize_event + logger = logging.getLogger(__name__) @@ -54,7 +56,7 @@ def evaluator_for_room_id(room_id, store): display_names[ev.state_key] = ev.content.get("displayname") defer.returnValue(BulkPushRuleEvaluator( - room_id, rules_by_user, display_names, users + room_id, rules_by_user, display_names, users, store )) @@ -67,13 +69,15 @@ class BulkPushRuleEvaluator: the same logic to run the actual rules, but could be optimised further (see https://matrix.org/jira/browse/SYN-562) """ - def __init__(self, room_id, rules_by_user, display_names, users_in_room): + def __init__(self, room_id, rules_by_user, display_names, users_in_room, store): self.room_id = room_id self.rules_by_user = rules_by_user self.display_names = display_names self.users_in_room = users_in_room + self.store = store - def action_for_event_by_user(self, event): + @defer.inlineCallbacks + def action_for_event_by_user(self, event, handler): actions_by_user = {} for uid, rules in self.rules_by_user.items(): @@ -81,6 +85,13 @@ class BulkPushRuleEvaluator: if uid in self.display_names: display_name = self.display_names[uid] + is_guest = yield self.store.is_guest(UserID.from_string(uid)) + filtered = yield handler._filter_events_for_client( + uid, [event], is_guest=is_guest + ) + if len(filtered) == 0: + continue + for rule in rules: if 'enabled' in rule and not rule['enabled']: continue @@ -94,14 +105,20 @@ class BulkPushRuleEvaluator: if len(actions) > 0: actions_by_user[uid] = actions break - return actions_by_user + defer.returnValue(actions_by_user) @staticmethod def event_matches_rule(event, rule, display_name, room_member_count, profile_tag): matches = True + + # passing the clock all the way into here is extremely awkward and push + # rules do not care about any of the relative timestamps, so we just + # pass 0 for the current time. + client_event = serialize_event(event, 0) + for cond in rule['conditions']: matches &= PushRuleEvaluator._event_fulfills_condition( - event, cond, display_name, room_member_count, profile_tag + client_event, cond, display_name, room_member_count, profile_tag ) return matches diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 25389ceded..c4d025b465 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -259,7 +259,10 @@ class RegisterRestServlet(RestServlet): def _do_guest_registration(self): if not self.hs.config.allow_guest_access: defer.returnValue((403, "Guest access is disabled")) - user_id, _ = yield self.registration_handler.register(generate_token=False) + user_id, _ = yield self.registration_handler.register( + generate_token=False, + make_guest=True + ) access_token = self.auth_handler.generate_access_token(user_id, ["guest = true"]) defer.returnValue((200, { "user_id": user_id, diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 3075d02257..0634af6b62 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -32,8 +32,8 @@ class EventPushActionsStore(SQLBaseStore): values = [] for uid, profile_tag, actions in tuples: values.append({ - 'room_id': event['room_id'], - 'event_id': event['event_id'], + 'room_id': event.room_id, + 'event_id': event.event_id, 'user_id': uid, 'profile_tag': profile_tag, 'actions': json.dumps(actions) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index f0fa0bd33c..c79066f774 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError, Codes from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks class RegistrationStore(SQLBaseStore): @@ -73,7 +73,8 @@ class RegistrationStore(SQLBaseStore): ) @defer.inlineCallbacks - def register(self, user_id, token, password_hash, was_guest=False): + def register(self, user_id, token, password_hash, + was_guest=False, make_guest=False): """Attempts to register an account. Args: @@ -82,15 +83,18 @@ class RegistrationStore(SQLBaseStore): password_hash (str): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. + make_guest (boolean): True if the the new user should be guest, + false to add a regular user account. Raises: StoreError if the user_id could not be registered. """ yield self.runInteraction( "register", - self._register, user_id, token, password_hash, was_guest + self._register, user_id, token, password_hash, was_guest, make_guest ) + self.is_guest.invalidate((user_id,)) - def _register(self, txn, user_id, token, password_hash, was_guest): + def _register(self, txn, user_id, token, password_hash, was_guest, make_guest): now = int(self.clock.time()) next_id = self._access_tokens_id_gen.get_next_txn(txn) @@ -100,12 +104,14 @@ class RegistrationStore(SQLBaseStore): txn.execute("UPDATE users SET" " password_hash = ?," " upgrade_ts = ?" + " is_guest = ?" " WHERE name = ?", - [password_hash, now, user_id]) + [password_hash, now, make_guest, user_id]) else: - txn.execute("INSERT INTO users(name, password_hash, creation_ts) " - "VALUES (?,?,?)", - [user_id, password_hash, now]) + txn.execute("INSERT INTO users " + "(name, password_hash, creation_ts, is_guest) " + "VALUES (?,?,?,?)", + [user_id, password_hash, now, make_guest]) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE @@ -126,7 +132,7 @@ class RegistrationStore(SQLBaseStore): keyvalues={ "name": user_id, }, - retcols=["name", "password_hash"], + retcols=["name", "password_hash", "is_guest"], allow_none=True, ) @@ -136,7 +142,7 @@ class RegistrationStore(SQLBaseStore): """ def f(txn): sql = ( - "SELECT name, password_hash FROM users" + "SELECT name, password_hash, is_guest FROM users" " WHERE lower(name) = lower(?)" ) txn.execute(sql, (user_id,)) @@ -249,9 +255,21 @@ class RegistrationStore(SQLBaseStore): defer.returnValue(res if res else False) + @cachedInlineCallbacks() + def is_guest(self, user): + res = yield self._simple_select_one_onecol( + table="users", + keyvalues={"name": user.to_string()}, + retcol="is_guest", + allow_none=True, + desc="is_guest", + ) + + defer.returnValue(res if res else False) + def _query_for_auth(self, txn, token): sql = ( - "SELECT users.name, access_tokens.id as token_id" + "SELECT users.name, users.is_guest, access_tokens.id as token_id" " FROM users" " INNER JOIN access_tokens on users.name = access_tokens.user_id" " WHERE token = ?" diff --git a/synapse/storage/schema/delta/27/event_push_actions.sql b/synapse/storage/schema/delta/27/event_push_actions.sql deleted file mode 100644 index bdf6ae3f24..0000000000 --- a/synapse/storage/schema/delta/27/event_push_actions.sql +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_push_actions( - room_id TEXT NOT NULL, - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - profile_tag VARCHAR(32), - actions TEXT NOT NULL, - CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) -); - - -CREATE INDEX event_push_actions_room_id_event_id_user_id_profile_tag on event_push_actions(room_id, event_id, user_id, profile_tag); diff --git a/synapse/storage/schema/delta/28/event_push_actions.sql b/synapse/storage/schema/delta/28/event_push_actions.sql new file mode 100644 index 0000000000..bdf6ae3f24 --- /dev/null +++ b/synapse/storage/schema/delta/28/event_push_actions.sql @@ -0,0 +1,26 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_push_actions( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) +); + + +CREATE INDEX event_push_actions_room_id_event_id_user_id_profile_tag on event_push_actions(room_id, event_id, user_id, profile_tag); -- cgit 1.4.1 From ae1262a241aa816b9e0f19e628afcc83229af64f Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 6 Jan 2016 11:58:20 +0000 Subject: Add schema change file for is_guest flag --- synapse/storage/schema/delta/28/users_is_guest.sql | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 synapse/storage/schema/delta/28/users_is_guest.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/28/users_is_guest.sql b/synapse/storage/schema/delta/28/users_is_guest.sql new file mode 100644 index 0000000000..80792e85de --- /dev/null +++ b/synapse/storage/schema/delta/28/users_is_guest.sql @@ -0,0 +1,22 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE users ADD is_guest admin SMALLINT DEFAULT 0 NOT NULL; +/* + * NB: any guest users created between 27 and 28 will be incorrectly + * marked as not guests: we don't bother to fill these in correctly + * because guest access is not really complete in 27 anyway so it's + * very unlikley there will be any guest users created. + */ -- cgit 1.4.1 From 992928304f3c95f87a3297799965159d295432ea Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 6 Jan 2016 11:58:46 +0000 Subject: Delete notifications for redacted events --- synapse/push/action_generator.py | 7 +++++++ synapse/storage/event_push_actions.py | 12 ++++++++++++ 2 files changed, 19 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index bcd40798f9..4cf94f6c61 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -19,6 +19,8 @@ import bulk_push_rule_evaluator import logging +from synapse.api.constants import EventTypes + logger = logging.getLogger(__name__) @@ -34,6 +36,11 @@ class ActionGenerator: @defer.inlineCallbacks def handle_push_actions_for_event(self, event, handler): + if event.type == EventTypes.Redaction and event.redacts is not None: + yield self.store.remove_push_actions_for_event_id( + event.room_id, event.redacts + ) + bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( event.room_id, self.store ) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 0634af6b62..5b44431ab9 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -93,6 +93,18 @@ class EventPushActionsStore(SQLBaseStore): ) defer.returnValue(ret) + @defer.inlineCallbacks + def remove_push_actions_for_event_id(self, room_id, event_id): + def f(txn): + txn.execute( + "DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?", + (room_id, event_id) + ) + yield self.runInteraction( + "remove_push_actions_for_event_id", + f + ) + class EventPushActionsTable(object): table_name = "event_push_actions" -- cgit 1.4.1 From 5880de186b2cd5708189c3a6e9a847174fedd495 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 6 Jan 2016 13:52:16 +0000 Subject: Log when we skip daily messages --- synapse/storage/events.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index fc5725097c..ab500d728a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -936,6 +936,7 @@ class EventsStore(SQLBaseStore): ) now_reporting = self.cursor_to_dict(txn) if not now_reporting: + logger.info("Calculating daily messages skipped; no now_reporting") return None now_reporting = now_reporting[0]["stream_ordering"] @@ -948,11 +949,18 @@ class EventsStore(SQLBaseStore): ) if not last_reported: + logger.info("Calculating daily messages skipped; no last_reported") return None # Close enough to correct for our purposes. yesterday = (now - 24 * 60 * 60) - if math.fabs(yesterday - last_reported[0]["reported_time"]) > 60 * 60: + since_yesterday_seconds = yesterday - last_reported[0]["reported_time"] + any_since_yesterday = math.fabs(since_yesterday_seconds) > 60 * 60 + if any_since_yesterday: + logger.info( + "Calculating daily messages skipped; since_yesterday_seconds: %d" % + (since_yesterday_seconds,) + ) return None txn.execute( @@ -968,6 +976,7 @@ class EventsStore(SQLBaseStore): ) rows = self.cursor_to_dict(txn) if not rows: + logger.info("Calculating daily messages skipped; messages count missing") return None return rows[0]["messages"] -- cgit 1.4.1 From b6a585348ae8a07dc8105242e182435a240e6b8f Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 6 Jan 2016 17:16:02 +0000 Subject: Adding is_guest here won't work because it just constructs a dict of uid -> password hash --- synapse/storage/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index c79066f774..a52b67013b 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -142,7 +142,7 @@ class RegistrationStore(SQLBaseStore): """ def f(txn): sql = ( - "SELECT name, password_hash, is_guest FROM users" + "SELECT name, password_hash FROM users" " WHERE lower(name) = lower(?)" ) txn.execute(sql, (user_id,)) -- cgit 1.4.1 From 6c28ac260c2ce4bf93737e53ea3297bff08924c7 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 7 Jan 2016 04:26:29 +0000 Subject: copyrights --- contrib/cmdclient/console.py | 2 +- contrib/cmdclient/http.py | 2 +- contrib/experiments/cursesio.py | 2 +- contrib/experiments/test_messaging.py | 2 +- contrib/graph/graph.py | 2 +- contrib/graph/graph2.py | 2 +- scripts-dev/copyrighter-sql.pl | 4 ++-- scripts-dev/copyrighter.pl | 4 ++-- scripts/register_new_matrix_user | 2 +- scripts/synapse_port_db | 2 +- setup.py | 2 +- synapse/__init__.py | 2 +- synapse/api/__init__.py | 2 +- synapse/api/constants.py | 2 +- synapse/api/errors.py | 2 +- synapse/api/filtering.py | 2 +- synapse/api/ratelimiting.py | 2 +- synapse/api/urls.py | 2 +- synapse/app/__init__.py | 2 +- synapse/app/homeserver.py | 2 +- synapse/app/synctl.py | 2 +- synapse/appservice/__init__.py | 2 +- synapse/appservice/api.py | 2 +- synapse/appservice/scheduler.py | 2 +- synapse/config/__init__.py | 2 +- synapse/config/__main__.py | 2 +- synapse/config/_base.py | 2 +- synapse/config/appservice.py | 2 +- synapse/config/captcha.py | 2 +- synapse/config/cas.py | 2 +- synapse/config/database.py | 2 +- synapse/config/homeserver.py | 2 +- synapse/config/key.py | 2 +- synapse/config/logger.py | 2 +- synapse/config/metrics.py | 2 +- synapse/config/password.py | 2 +- synapse/config/ratelimiting.py | 2 +- synapse/config/registration.py | 2 +- synapse/config/server.py | 2 +- synapse/config/tls.py | 2 +- synapse/config/voip.py | 2 +- synapse/crypto/__init__.py | 2 +- synapse/crypto/context_factory.py | 2 +- synapse/crypto/event_signing.py | 2 +- synapse/crypto/keyclient.py | 2 +- synapse/crypto/keyring.py | 2 +- synapse/events/__init__.py | 2 +- synapse/events/builder.py | 2 +- synapse/events/snapshot.py | 2 +- synapse/events/utils.py | 2 +- synapse/events/validator.py | 2 +- synapse/federation/__init__.py | 2 +- synapse/federation/federation_base.py | 2 +- synapse/federation/federation_client.py | 2 +- synapse/federation/federation_server.py | 2 +- synapse/federation/persistence.py | 2 +- synapse/federation/replication.py | 2 +- synapse/federation/transaction_queue.py | 2 +- synapse/federation/transport/__init__.py | 2 +- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/federation/units.py | 2 +- synapse/handlers/__init__.py | 2 +- synapse/handlers/account_data.py | 2 +- synapse/handlers/admin.py | 2 +- synapse/handlers/appservice.py | 2 +- synapse/handlers/directory.py | 2 +- synapse/handlers/events.py | 2 +- synapse/handlers/federation.py | 2 +- synapse/handlers/identity.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/profile.py | 2 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/search.py | 2 +- synapse/handlers/typing.py | 2 +- synapse/http/__init__.py | 2 +- synapse/http/client.py | 2 +- synapse/http/endpoint.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/http/server.py | 2 +- synapse/http/servlet.py | 2 +- synapse/metrics/__init__.py | 2 +- synapse/metrics/metric.py | 2 +- synapse/metrics/resource.py | 2 +- synapse/push/__init__.py | 2 +- synapse/push/baserules.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/push/push_rule_evaluator.py | 2 +- synapse/push/pusherpool.py | 2 +- synapse/push/rulekinds.py | 2 +- synapse/python_dependencies.py | 2 +- synapse/rest/__init__.py | 2 +- synapse/rest/client/__init__.py | 2 +- synapse/rest/client/v1/__init__.py | 2 +- synapse/rest/client/v1/admin.py | 2 +- synapse/rest/client/v1/base.py | 2 +- synapse/rest/client/v1/directory.py | 2 +- synapse/rest/client/v1/events.py | 2 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/presence.py | 2 +- synapse/rest/client/v1/profile.py | 2 +- synapse/rest/client/v1/push_rule.py | 2 +- synapse/rest/client/v1/pusher.py | 2 +- synapse/rest/client/v1/register.py | 2 +- synapse/rest/client/v1/room.py | 2 +- synapse/rest/client/v1/transactions.py | 2 +- synapse/rest/client/v1/voip.py | 2 +- synapse/rest/client/v2_alpha/__init__.py | 2 +- synapse/rest/client/v2_alpha/_base.py | 2 +- synapse/rest/client/v2_alpha/account.py | 2 +- synapse/rest/client/v2_alpha/account_data.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 2 +- synapse/rest/client/v2_alpha/filter.py | 2 +- synapse/rest/client/v2_alpha/keys.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/tags.py | 2 +- synapse/rest/client/v2_alpha/tokenrefresh.py | 2 +- synapse/rest/key/__init__.py | 2 +- synapse/rest/key/v1/__init__.py | 2 +- synapse/rest/key/v1/server_key_resource.py | 2 +- synapse/rest/key/v2/__init__.py | 2 +- synapse/rest/key/v2/local_key_resource.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/rest/media/v0/content_repository.py | 2 +- synapse/rest/media/v1/__init__.py | 2 +- synapse/rest/media/v1/base_resource.py | 2 +- synapse/rest/media/v1/download_resource.py | 2 +- synapse/rest/media/v1/filepath.py | 2 +- synapse/rest/media/v1/identicon_resource.py | 2 +- synapse/rest/media/v1/media_repository.py | 2 +- synapse/rest/media/v1/thumbnailer.py | 2 +- synapse/rest/media/v1/upload_resource.py | 2 +- synapse/server.py | 2 +- synapse/state.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/account_data.py | 2 +- synapse/storage/appservice.py | 2 +- synapse/storage/background_updates.py | 2 +- synapse/storage/directory.py | 2 +- synapse/storage/end_to_end_keys.py | 2 +- synapse/storage/engines/__init__.py | 2 +- synapse/storage/engines/_base.py | 2 +- synapse/storage/engines/postgres.py | 2 +- synapse/storage/engines/sqlite3.py | 2 +- synapse/storage/event_federation.py | 2 +- synapse/storage/events.py | 2 +- synapse/storage/filtering.py | 2 +- synapse/storage/keys.py | 2 +- synapse/storage/media_repository.py | 2 +- synapse/storage/presence.py | 2 +- synapse/storage/profile.py | 2 +- synapse/storage/push_rule.py | 2 +- synapse/storage/pusher.py | 2 +- synapse/storage/receipts.py | 2 +- synapse/storage/rejections.py | 2 +- synapse/storage/room.py | 2 +- synapse/storage/roommember.py | 2 +- synapse/storage/schema/delta/11/v11.sql | 2 +- synapse/storage/schema/delta/12/v12.sql | 2 +- synapse/storage/schema/delta/13/v13.sql | 2 +- synapse/storage/schema/delta/14/upgrade_appservice_db.py | 2 +- synapse/storage/schema/delta/14/v14.sql | 2 +- synapse/storage/schema/delta/15/appservice_txns.sql | 2 +- synapse/storage/schema/delta/17/drop_indexes.sql | 2 +- synapse/storage/schema/delta/17/server_keys.sql | 2 +- synapse/storage/schema/delta/18/server_keys_bigger_ints.sql | 2 +- synapse/storage/schema/delta/19/event_index.sql | 2 +- synapse/storage/schema/delta/20/pushers.py | 2 +- synapse/storage/schema/delta/21/end_to_end_keys.sql | 2 +- synapse/storage/schema/delta/21/receipts.sql | 2 +- synapse/storage/schema/delta/22/receipts_index.sql | 2 +- synapse/storage/schema/delta/23/drop_state_index.sql | 2 +- synapse/storage/schema/delta/23/refresh_tokens.sql | 2 +- synapse/storage/schema/delta/24/stats_reporting.sql | 2 +- synapse/storage/schema/delta/25/00background_updates.sql | 2 +- synapse/storage/schema/delta/25/fts.py | 2 +- synapse/storage/schema/delta/25/guest_access.sql | 2 +- synapse/storage/schema/delta/25/history_visibility.sql | 2 +- synapse/storage/schema/delta/25/tags.sql | 2 +- synapse/storage/schema/delta/26/account_data.sql | 2 +- synapse/storage/schema/delta/27/account_data.sql | 2 +- synapse/storage/schema/delta/27/forgotten_memberships.sql | 2 +- synapse/storage/schema/delta/27/ts.py | 2 +- synapse/storage/schema/full_schemas/11/event_edges.sql | 2 +- synapse/storage/schema/full_schemas/11/event_signatures.sql | 2 +- synapse/storage/schema/full_schemas/11/im.sql | 2 +- synapse/storage/schema/full_schemas/11/keys.sql | 2 +- synapse/storage/schema/full_schemas/11/media_repository.sql | 2 +- synapse/storage/schema/full_schemas/11/presence.sql | 2 +- synapse/storage/schema/full_schemas/11/profiles.sql | 2 +- synapse/storage/schema/full_schemas/11/redactions.sql | 2 +- synapse/storage/schema/full_schemas/11/room_aliases.sql | 2 +- synapse/storage/schema/full_schemas/11/state.sql | 2 +- synapse/storage/schema/full_schemas/11/transactions.sql | 2 +- synapse/storage/schema/full_schemas/11/users.sql | 2 +- synapse/storage/schema/full_schemas/16/application_services.sql | 2 +- synapse/storage/schema/full_schemas/16/event_edges.sql | 2 +- synapse/storage/schema/full_schemas/16/event_signatures.sql | 2 +- synapse/storage/schema/full_schemas/16/im.sql | 2 +- synapse/storage/schema/full_schemas/16/keys.sql | 2 +- synapse/storage/schema/full_schemas/16/media_repository.sql | 2 +- synapse/storage/schema/full_schemas/16/presence.sql | 2 +- synapse/storage/schema/full_schemas/16/profiles.sql | 2 +- synapse/storage/schema/full_schemas/16/push.sql | 2 +- synapse/storage/schema/full_schemas/16/redactions.sql | 2 +- synapse/storage/schema/full_schemas/16/room_aliases.sql | 2 +- synapse/storage/schema/full_schemas/16/state.sql | 2 +- synapse/storage/schema/full_schemas/16/transactions.sql | 2 +- synapse/storage/schema/full_schemas/16/users.sql | 2 +- synapse/storage/schema/schema_version.sql | 2 +- synapse/storage/search.py | 2 +- synapse/storage/signatures.py | 2 +- synapse/storage/state.py | 2 +- synapse/storage/stream.py | 2 +- synapse/storage/tags.py | 2 +- synapse/storage/transactions.py | 2 +- synapse/storage/util/__init__.py | 2 +- synapse/storage/util/id_generators.py | 2 +- synapse/streams/__init__.py | 2 +- synapse/streams/config.py | 2 +- synapse/streams/events.py | 2 +- synapse/types.py | 2 +- synapse/util/__init__.py | 2 +- synapse/util/async.py | 2 +- synapse/util/caches/__init__.py | 2 +- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/dictionary_cache.py | 2 +- synapse/util/caches/expiringcache.py | 2 +- synapse/util/caches/lrucache.py | 2 +- synapse/util/caches/snapshot_cache.py | 2 +- synapse/util/debug.py | 2 +- synapse/util/distributor.py | 2 +- synapse/util/frozenutils.py | 2 +- synapse/util/jsonobject.py | 2 +- synapse/util/logcontext.py | 2 +- synapse/util/logutils.py | 2 +- synapse/util/ratelimitutils.py | 2 +- synapse/util/retryutils.py | 2 +- synapse/util/stringutils.py | 2 +- tests/__init__.py | 2 +- tests/api/test_filtering.py | 2 +- tests/appservice/__init__.py | 2 +- tests/appservice/test_appservice.py | 2 +- tests/appservice/test_scheduler.py | 2 +- tests/crypto/__init__.py | 2 +- tests/crypto/test_event_signing.py | 2 +- tests/events/test_utils.py | 2 +- tests/federation/test_federation.py | 2 +- tests/handlers/test_appservice.py | 2 +- tests/handlers/test_auth.py | 2 +- tests/handlers/test_directory.py | 2 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_presence.py | 2 +- tests/handlers/test_presencelike.py | 2 +- tests/handlers/test_profile.py | 2 +- tests/handlers/test_room.py | 2 +- tests/handlers/test_typing.py | 2 +- tests/metrics/test_metric.py | 2 +- tests/rest/__init__.py | 2 +- tests/rest/client/__init__.py | 2 +- tests/rest/client/v1/__init__.py | 2 +- tests/rest/client/v1/test_events.py | 2 +- tests/rest/client/v1/test_presence.py | 2 +- tests/rest/client/v1/test_profile.py | 2 +- tests/rest/client/v1/test_rooms.py | 2 +- tests/rest/client/v1/test_typing.py | 2 +- tests/rest/client/v1/utils.py | 2 +- tests/rest/client/v2_alpha/__init__.py | 2 +- tests/rest/client/v2_alpha/test_filter.py | 2 +- tests/storage/event_injector.py | 2 +- tests/storage/test__base.py | 2 +- tests/storage/test_appservice.py | 2 +- tests/storage/test_base.py | 2 +- tests/storage/test_directory.py | 2 +- tests/storage/test_events.py | 2 +- tests/storage/test_presence.py | 2 +- tests/storage/test_profile.py | 2 +- tests/storage/test_redaction.py | 2 +- tests/storage/test_registration.py | 2 +- tests/storage/test_room.py | 2 +- tests/storage/test_roommember.py | 2 +- tests/storage/test_stream.py | 2 +- tests/test_distributor.py | 2 +- tests/test_state.py | 2 +- tests/test_test_utils.py | 2 +- tests/test_types.py | 2 +- tests/unittest.py | 2 +- tests/util/__init__.py | 2 +- tests/util/test_dict_cache.py | 2 +- tests/util/test_lrucache.py | 2 +- tests/util/test_snapshot_cache.py | 2 +- tests/utils.py | 2 +- 295 files changed, 297 insertions(+), 297 deletions(-) (limited to 'synapse/storage') diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index d9c6ec6a70..8bb03ce66a 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2014 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 869f782ec1..4186897316 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/experiments/cursesio.py b/contrib/experiments/cursesio.py index 95d87a1fda..44afe81008 100644 --- a/contrib/experiments/cursesio.py +++ b/contrib/experiments/cursesio.py @@ -1,4 +1,4 @@ -# Copyright 2014 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index fedf786cec..85c9c11984 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index b2acadcf5e..afd1d446b4 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -1,4 +1,4 @@ -# Copyright 2014 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py index d0d2cfe7c0..1ccad65728 100644 --- a/contrib/graph/graph2.py +++ b/contrib/graph/graph2.py @@ -1,4 +1,4 @@ -# Copyright 2014 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts-dev/copyrighter-sql.pl b/scripts-dev/copyrighter-sql.pl index 890e51e587..13e630fc11 100755 --- a/scripts-dev/copyrighter-sql.pl +++ b/scripts-dev/copyrighter-sql.pl @@ -1,5 +1,5 @@ #!/usr/bin/perl -pi -# Copyright 2015 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # limitations under the License. $copyright = < Date: Thu, 7 Jan 2016 10:15:35 +0000 Subject: This comma is actually important --- synapse/storage/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index a52b67013b..ece71f2ee8 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -103,7 +103,7 @@ class RegistrationStore(SQLBaseStore): if was_guest: txn.execute("UPDATE users SET" " password_hash = ?," - " upgrade_ts = ?" + " upgrade_ts = ?," " is_guest = ?" " WHERE name = ?", [password_hash, now, make_guest, user_id]) -- cgit 1.4.1 From fe56138142f386c927e0221414f223428d7c000d Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 11 Jan 2016 17:09:03 +0000 Subject: Remove rogue 'admin' --- synapse/storage/schema/delta/28/users_is_guest.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/28/users_is_guest.sql b/synapse/storage/schema/delta/28/users_is_guest.sql index 80792e85de..21d2b420bf 100644 --- a/synapse/storage/schema/delta/28/users_is_guest.sql +++ b/synapse/storage/schema/delta/28/users_is_guest.sql @@ -13,7 +13,7 @@ * limitations under the License. */ -ALTER TABLE users ADD is_guest admin SMALLINT DEFAULT 0 NOT NULL; +ALTER TABLE users ADD is_guest SMALLINT DEFAULT 0 NOT NULL; /* * NB: any guest users created between 27 and 28 will be incorrectly * marked as not guests: we don't bother to fill these in correctly -- cgit 1.4.1 From b5d33a656f7045caa946d4bb25990a8c704dbb29 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 11 Jan 2016 17:13:52 +0000 Subject: Postgres doesn't like booleans --- synapse/storage/registration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index ece71f2ee8..999b710fbb 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -106,12 +106,12 @@ class RegistrationStore(SQLBaseStore): " upgrade_ts = ?," " is_guest = ?" " WHERE name = ?", - [password_hash, now, make_guest, user_id]) + [password_hash, now, 1 if make_guest else 0, user_id]) else: txn.execute("INSERT INTO users " "(name, password_hash, creation_ts, is_guest) " "VALUES (?,?,?,?)", - [user_id, password_hash, now, make_guest]) + [user_id, password_hash, now, 1 if make_guest else 0]) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE -- cgit 1.4.1 From 31de2953a35ab9537b82e712eea78495cdd33712 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 12 Jan 2016 14:22:24 +0000 Subject: Remove the PushRuleTable and PushRuleEnableTable objects --- synapse/storage/push_rule.py | 83 ++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 50 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 0829262f42..a4dde1aac0 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -27,11 +27,14 @@ class PushRuleStore(SQLBaseStore): @cachedInlineCallbacks() def get_push_rules_for_user(self, user_name): rows = yield self._simple_select_list( - table=PushRuleTable.table_name, + table="push_rules", keyvalues={ "user_name": user_name, }, - retcols=PushRuleTable.fields, + retcols=( + "user_name", "rule_id", "priority_class", "priority", + "conditions", "actions", + ), desc="get_push_rules_enabled_for_user", ) @@ -44,11 +47,13 @@ class PushRuleStore(SQLBaseStore): @cachedInlineCallbacks() def get_push_rules_enabled_for_user(self, user_name): results = yield self._simple_select_list( - table=PushRuleEnableTable.table_name, + table="push_rules_enable", keyvalues={ 'user_name': user_name }, - retcols=PushRuleEnableTable.fields, + retcols=( + "user_name", "rule_id", "enabled", + ), desc="get_push_rules_enabled_for_user", ) defer.returnValue({ @@ -61,14 +66,15 @@ class PushRuleStore(SQLBaseStore): def f(txn, user_ids_to_fetch): sql = ( - "SELECT " + - ",".join("pr."+x for x in PushRuleTable.fields) + - " FROM " + PushRuleTable.table_name + " pr " + - " LEFT JOIN " + PushRuleEnableTable.table_name + " pre " + - " ON pr.user_name = pre.user_name and pr.rule_id = pre.rule_id " + - " WHERE pr.user_name " + + "SELECT" + " pr.user_name, pr.rule_id, priority_class, priority," + " conditions, actions" + " FROM push_rules AS pr" + " LEFT JOIN push_rules_enable AS pre" + " ON pr.user_name = pre.user_name AND pr.rule_id = pre.rule_id" + " WHERE pr.user_name" " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" - " AND (pre.enabled is null or pre.enabled = 1)" + " AND (pre.enabled IS NULL OR pre.enabled = 1)" " ORDER BY pr.user_name, pr.priority_class DESC, pr.priority DESC" ) txn.execute(sql, user_ids_to_fetch) @@ -86,14 +92,15 @@ class PushRuleStore(SQLBaseStore): "bulk_get_push_rules", f, batch_user_ids ) - for r in rows: - rawdict = { - PushRuleTable.fields[i]: r[i] for i in range(len(r)) - } + cols = ( + "user_name", "rule_id", "priority_class", "priority", + "conditions", "actions", + ) + + for row in rows: + rawdict = dict(zip(cols, rows)) + results.setdefault(rawdict["user_name"], []).append(rawdict) - if rawdict['user_name'] not in results: - results[rawdict['user_name']] = [] - results[rawdict['user_name']].append(rawdict) defer.returnValue(results) @defer.inlineCallbacks @@ -131,7 +138,7 @@ class PushRuleStore(SQLBaseStore): res = self._simple_select_one_txn( txn, - table=PushRuleTable.table_name, + table="push_rules", keyvalues={ "user_name": user_name, "rule_id": relative_to_rule, @@ -170,7 +177,7 @@ class PushRuleStore(SQLBaseStore): new_rule['priority'] = new_rule_priority sql = ( - "SELECT COUNT(*) FROM " + PushRuleTable.table_name + + "SELECT COUNT(*) FROM push_rules" " WHERE user_name = ? AND priority_class = ? AND priority = ?" ) txn.execute(sql, (user_name, priority_class, new_rule_priority)) @@ -179,7 +186,7 @@ class PushRuleStore(SQLBaseStore): # if there are conflicting rules, bump everything if num_conflicting: - sql = "UPDATE "+PushRuleTable.table_name+" SET priority = priority " + sql = "UPDATE push_rules SET priority = priority " if after: sql += "-1" else: @@ -202,7 +209,7 @@ class PushRuleStore(SQLBaseStore): self._simple_insert_txn( txn, - table=PushRuleTable.table_name, + table="push_rules", values=new_rule, ) @@ -210,7 +217,7 @@ class PushRuleStore(SQLBaseStore): priority_class, **kwargs): # find the highest priority rule in that class sql = ( - "SELECT COUNT(*), MAX(priority) FROM " + PushRuleTable.table_name + + "SELECT COUNT(*), MAX(priority) FROM push_rules" " WHERE user_name = ? and priority_class = ?" ) txn.execute(sql, (user_name, priority_class)) @@ -237,7 +244,7 @@ class PushRuleStore(SQLBaseStore): self._simple_insert_txn( txn, - table=PushRuleTable.table_name, + table="push_rules", values=new_rule, ) @@ -253,7 +260,7 @@ class PushRuleStore(SQLBaseStore): rule_id (str): The rule_id of the rule to be deleted """ yield self._simple_delete_one( - PushRuleTable.table_name, + "push_rules", {'user_name': user_name, 'rule_id': rule_id}, desc="delete_push_rule", ) @@ -274,7 +281,7 @@ class PushRuleStore(SQLBaseStore): new_id = self._push_rules_enable_id_gen.get_next_txn(txn) self._simple_upsert_txn( txn, - PushRuleEnableTable.table_name, + "push_rules_enable", {'user_name': user_name, 'rule_id': rule_id}, {'enabled': 1 if enabled else 0}, {'id': new_id}, @@ -293,27 +300,3 @@ class RuleNotFoundException(Exception): class InconsistentRuleException(Exception): pass - - -class PushRuleTable(object): - table_name = "push_rules" - - fields = [ - "id", - "user_name", - "rule_id", - "priority_class", - "priority", - "conditions", - "actions", - ] - - -class PushRuleEnableTable(object): - table_name = "push_rules_enable" - - fields = [ - "user_name", - "rule_id", - "enabled" - ] -- cgit 1.4.1 From a8e9e0b916b6d4e4ff739d28d9d036e8aab2fbbb Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 12 Jan 2016 14:41:26 +0000 Subject: Remove the PushersTable and EventPushActionsTable objects --- synapse/storage/event_push_actions.py | 6 +----- synapse/storage/pusher.py | 14 +++++--------- 2 files changed, 6 insertions(+), 14 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 5b44431ab9..d99171ee87 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -42,7 +42,7 @@ class EventPushActionsStore(SQLBaseStore): yield self.runInteraction( "set_actions_for_event_and_users", self._simple_insert_many_txn, - EventPushActionsTable.table_name, + "event_push_actions", values ) @@ -104,7 +104,3 @@ class EventPushActionsStore(SQLBaseStore): "remove_push_actions_for_event_id", f ) - - -class EventPushActionsTable(object): - table_name = "event_push_actions" diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index b9568dad26..2b90d6c622 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -86,7 +86,7 @@ class PusherStore(SQLBaseStore): try: next_id = yield self._pushers_id_gen.get_next() yield self._simple_upsert( - PushersTable.table_name, + "pushers", dict( app_id=app_id, pushkey=pushkey, @@ -114,7 +114,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def delete_pusher_by_app_id_pushkey_user_name(self, app_id, pushkey, user_name): yield self._simple_delete_one( - PushersTable.table_name, + "pushers", {"app_id": app_id, "pushkey": pushkey, 'user_name': user_name}, desc="delete_pusher_by_app_id_pushkey_user_name", ) @@ -122,7 +122,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def update_pusher_last_token(self, app_id, pushkey, user_name, last_token): yield self._simple_update_one( - PushersTable.table_name, + "pushers", {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name}, {'last_token': last_token}, desc="update_pusher_last_token", @@ -132,7 +132,7 @@ class PusherStore(SQLBaseStore): def update_pusher_last_token_and_success(self, app_id, pushkey, user_name, last_token, last_success): yield self._simple_update_one( - PushersTable.table_name, + "pushers", {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name}, {'last_token': last_token, 'last_success': last_success}, desc="update_pusher_last_token_and_success", @@ -142,12 +142,8 @@ class PusherStore(SQLBaseStore): def update_pusher_failing_since(self, app_id, pushkey, user_name, failing_since): yield self._simple_update_one( - PushersTable.table_name, + "pushers", {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name}, {'failing_since': failing_since}, desc="update_pusher_failing_since", ) - - -class PushersTable(object): - table_name = "pushers" -- cgit 1.4.1 From 96e400fee5bf821174730e1421a14e4aa94e3c0d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 13 Jan 2016 11:07:32 +0000 Subject: Remove the RoomsTable object --- synapse/storage/room.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 390bd78654..dc09a3aaba 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -49,7 +49,7 @@ class RoomStore(SQLBaseStore): """ try: yield self._simple_insert( - RoomsTable.table_name, + "rooms", { "room_id": room_id, "creator": room_creator_user_id, @@ -70,9 +70,9 @@ class RoomStore(SQLBaseStore): A namedtuple containing the room information, or an empty list. """ return self._simple_select_one( - table=RoomsTable.table_name, + table="rooms", keyvalues={"room_id": room_id}, - retcols=RoomsTable.fields, + retcols=("room_id", "is_public", "creator"), desc="get_room", allow_none=True, ) @@ -275,13 +275,3 @@ class RoomStore(SQLBaseStore): aliases.extend(e.content['aliases']) defer.returnValue((name, aliases)) - - -class RoomsTable(object): - table_name = "rooms" - - fields = [ - "room_id", - "is_public", - "creator" - ] -- cgit 1.4.1 From c0a279e808435d286ae254f51253d2adb3ee7858 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 13 Jan 2016 11:15:20 +0000 Subject: Delete the table objects from TransactionStore --- synapse/storage/transactions.py | 68 ++++++----------------------------------- tests/handlers/test_presence.py | 1 - tests/handlers/test_typing.py | 1 - 3 files changed, 10 insertions(+), 60 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index b40a070b69..4475c451c1 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -16,8 +16,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached -from collections import namedtuple - from canonicaljson import encode_canonical_json import logging @@ -50,12 +48,15 @@ class TransactionStore(SQLBaseStore): def _get_received_txn_response(self, txn, transaction_id, origin): result = self._simple_select_one_txn( txn, - table=ReceivedTransactionsTable.table_name, + table="received_transactions", keyvalues={ "transaction_id": transaction_id, "origin": origin, }, - retcols=ReceivedTransactionsTable.fields, + retcols=( + "transaction_id", "origin", "ts", "response_code", "response_json", + "has_been_referenced", + ), allow_none=True, ) @@ -79,7 +80,7 @@ class TransactionStore(SQLBaseStore): """ return self._simple_insert( - table=ReceivedTransactionsTable.table_name, + table="received_transactions", values={ "transaction_id": transaction_id, "origin": origin, @@ -136,7 +137,7 @@ class TransactionStore(SQLBaseStore): self._simple_insert_txn( txn, - table=SentTransactions.table_name, + table="sent_transactions", values={ "id": next_id, "transaction_id": transaction_id, @@ -171,7 +172,7 @@ class TransactionStore(SQLBaseStore): code, response_json): self._simple_update_one_txn( txn, - table=SentTransactions.table_name, + table="sent_transactions", keyvalues={ "transaction_id": transaction_id, "destination": destination, @@ -229,11 +230,11 @@ class TransactionStore(SQLBaseStore): def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( txn, - table=DestinationsTable.table_name, + table="destinations", keyvalues={ "destination": destination, }, - retcols=DestinationsTable.fields, + retcols=("destination", "retry_last_ts", "retry_interval"), allow_none=True, ) @@ -304,52 +305,3 @@ class TransactionStore(SQLBaseStore): txn.execute(query, (self._clock.time_msec(),)) return self.cursor_to_dict(txn) - - -class ReceivedTransactionsTable(object): - table_name = "received_transactions" - - fields = [ - "transaction_id", - "origin", - "ts", - "response_code", - "response_json", - "has_been_referenced", - ] - - -class SentTransactions(object): - table_name = "sent_transactions" - - fields = [ - "id", - "transaction_id", - "destination", - "ts", - "response_code", - "response_json", - ] - - EntryType = namedtuple("SentTransactionsEntry", fields) - - -class TransactionsToPduTable(object): - table_name = "transaction_id_to_pdu" - - fields = [ - "transaction_id", - "destination", - "pdu_id", - "pdu_origin", - ] - - -class DestinationsTable(object): - table_name = "destinations" - - fields = [ - "destination", - "retry_last_ts", - "retry_interval", - ] diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 15000aae0c..447a22b5fc 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -28,7 +28,6 @@ from synapse.api.constants import PresenceState from synapse.api.errors import SynapseError from synapse.handlers.presence import PresenceHandler, UserPresenceCache from synapse.streams.config import SourcePaginationConfig -from synapse.storage.transactions import DestinationsTable from synapse.types import UserID OFFLINE = PresenceState.OFFLINE diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 124bc10e0f..763c04d667 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -27,7 +27,6 @@ from ..utils import ( from synapse.api.errors import AuthError from synapse.handlers.typing import TypingNotificationHandler -from synapse.storage.transactions import DestinationsTable from synapse.types import UserID -- cgit 1.4.1 From 8740e4e94a6de62680db2b6c7b9f9bfda244d8ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Jan 2016 11:37:17 +0000 Subject: bulk_get_push_rules should handle empty lists --- synapse/storage/push_rule.py | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 0829262f42..24d137f8ca 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -57,43 +57,35 @@ class PushRuleStore(SQLBaseStore): @defer.inlineCallbacks def bulk_get_push_rules(self, user_ids): + if not user_ids: + defer.returnValue({}) + batch_size = 100 def f(txn, user_ids_to_fetch): sql = ( - "SELECT " + - ",".join("pr."+x for x in PushRuleTable.fields) + - " FROM " + PushRuleTable.table_name + " pr " + - " LEFT JOIN " + PushRuleEnableTable.table_name + " pre " + - " ON pr.user_name = pre.user_name and pr.rule_id = pre.rule_id " + - " WHERE pr.user_name " + + "SELECT pr.*" + " FROM push_rules as pr " + " LEFT JOIN push_rules_enable as pre " + " ON pr.user_name = pre.user_name and pr.rule_id = pre.rule_id " + " WHERE pr.user_name " " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" " AND (pre.enabled is null or pre.enabled = 1)" " ORDER BY pr.user_name, pr.priority_class DESC, pr.priority DESC" ) txn.execute(sql, user_ids_to_fetch) - return txn.fetchall() + return self.cursor_to_dict(txn) results = {} - batch_start = 0 - while batch_start < len(user_ids): - batch_end = min(len(user_ids), batch_size) - batch_user_ids = user_ids[batch_start:batch_end] - batch_start = batch_end - + chunks = [user_ids[i:i+batch_size] for i in xrange(0, len(user_ids), batch_size)] + for batch_user_ids in chunks: rows = yield self.runInteraction( "bulk_get_push_rules", f, batch_user_ids ) - for r in rows: - rawdict = { - PushRuleTable.fields[i]: r[i] for i in range(len(r)) - } - - if rawdict['user_name'] not in results: - results[rawdict['user_name']] = [] - results[rawdict['user_name']].append(rawdict) + for row in rows: + results.setdefault(row['user_name'], []).append(row) defer.returnValue(results) @defer.inlineCallbacks -- cgit 1.4.1 From 9c1f853d58440d1f924fa55bc242b248c410dd7c Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 13 Jan 2016 13:08:59 +0000 Subject: Rename 'user_name' to 'user_id' in push to make it consistent with the rest of the code --- synapse/push/__init__.py | 36 ++++++++++++------------- synapse/push/baserules.py | 12 ++++----- synapse/push/httppusher.py | 6 ++--- synapse/push/push_rule_evaluator.py | 24 ++++++++--------- synapse/push/pusherpool.py | 30 ++++++++++----------- synapse/rest/client/v1/push_rule.py | 10 +++---- synapse/rest/client/v1/pusher.py | 4 +-- synapse/storage/push_rule.py | 54 ++++++++++++++++++------------------- synapse/storage/pusher.py | 22 +++++++-------- 9 files changed, 99 insertions(+), 99 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index c5ddfb564c..a5dc84160c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -35,7 +35,7 @@ class Pusher(object): MAX_BACKOFF = 60 * 60 * 1000 GIVE_UP_AFTER = 24 * 60 * 60 * 1000 - def __init__(self, _hs, profile_tag, user_name, app_id, + def __init__(self, _hs, profile_tag, user_id, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): self.hs = _hs @@ -43,7 +43,7 @@ class Pusher(object): self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() self.profile_tag = profile_tag - self.user_name = user_name + self.user_id = user_id self.app_id = app_id self.app_display_name = app_display_name self.device_display_name = device_display_name @@ -92,15 +92,15 @@ class Pusher(object): # we fail to dispatch the push) config = PaginationConfig(from_token=None, limit='1') chunk = yield self.evStreamHandler.get_stream( - self.user_name, config, timeout=0, affect_presence=False, + self.user_id, config, timeout=0, affect_presence=False, only_room_events=True ) self.last_token = chunk['end'] self.store.update_pusher_last_token( - self.app_id, self.pushkey, self.user_name, self.last_token + self.app_id, self.pushkey, self.user_id, self.last_token ) logger.info("Pusher %s for user %s starting from token %s", - self.pushkey, self.user_name, self.last_token) + self.pushkey, self.user_id, self.last_token) wait = 0 while self.alive: @@ -125,7 +125,7 @@ class Pusher(object): config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( - self.user_name, config, timeout=timeout, affect_presence=False, + self.user_id, config, timeout=timeout, affect_presence=False, only_room_events=True ) @@ -142,7 +142,7 @@ class Pusher(object): yield self.store.update_pusher_last_token( self.app_id, self.pushkey, - self.user_name, + self.user_id, self.last_token ) return @@ -153,8 +153,8 @@ class Pusher(object): processed = False rule_evaluator = yield \ - push_rule_evaluator.evaluator_for_user_name_and_profile_tag( - self.user_name, self.profile_tag, single_event['room_id'], self.store + push_rule_evaluator.evaluator_for_user_id_and_profile_tag( + self.user_id, self.profile_tag, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) @@ -179,7 +179,7 @@ class Pusher(object): pk ) yield self.hs.get_pusherpool().remove_pusher( - self.app_id, pk, self.user_name + self.app_id, pk, self.user_id ) else: processed = True @@ -193,7 +193,7 @@ class Pusher(object): yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, - self.user_name, + self.user_id, self.last_token, self.clock.time_msec() ) @@ -202,7 +202,7 @@ class Pusher(object): yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, - self.user_name, + self.user_id, self.failing_since) else: if not self.failing_since: @@ -210,7 +210,7 @@ class Pusher(object): yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, - self.user_name, + self.user_id, self.failing_since ) @@ -222,13 +222,13 @@ class Pusher(object): # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", - self.user_name, self.pushkey) + self.user_id, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, - self.user_name, + self.user_id, self.last_token ) @@ -236,14 +236,14 @@ class Pusher(object): yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, - self.user_name, + self.user_id, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", - self.user_name, + self.user_id, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) @@ -280,7 +280,7 @@ class Pusher(object): if last_active > self.last_last_active_time: self.last_last_active_time = last_active if self.has_unread: - logger.info("Resetting badge count for %s", self.user_name) + logger.info("Resetting badge count for %s", self.user_id) self.reset_badge_count() self.has_unread = False diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 57de0e34b4..8bac7fd6af 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -15,27 +15,27 @@ from synapse.push.rulekinds import PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP -def list_with_base_rules(rawrules, user_name): +def list_with_base_rules(rawrules, user_id): ruleslist = [] # shove the server default rules for each kind onto the end of each current_prio_class = PRIORITY_CLASS_INVERSE_MAP.keys()[-1] ruleslist.extend(make_base_prepend_rules( - user_name, PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + user_id, PRIORITY_CLASS_INVERSE_MAP[current_prio_class] )) for r in rawrules: if r['priority_class'] < current_prio_class: while r['priority_class'] < current_prio_class: ruleslist.extend(make_base_append_rules( - user_name, + user_id, PRIORITY_CLASS_INVERSE_MAP[current_prio_class] )) current_prio_class -= 1 if current_prio_class > 0: ruleslist.extend(make_base_prepend_rules( - user_name, + user_id, PRIORITY_CLASS_INVERSE_MAP[current_prio_class] )) @@ -43,13 +43,13 @@ def list_with_base_rules(rawrules, user_name): while current_prio_class > 0: ruleslist.extend(make_base_append_rules( - user_name, + user_id, PRIORITY_CLASS_INVERSE_MAP[current_prio_class] )) current_prio_class -= 1 if current_prio_class > 0: ruleslist.extend(make_base_prepend_rules( - user_name, + user_id, PRIORITY_CLASS_INVERSE_MAP[current_prio_class] )) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 7866db6a24..28f1fab0e4 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -23,13 +23,13 @@ logger = logging.getLogger(__name__) class HttpPusher(Pusher): - def __init__(self, _hs, profile_tag, user_name, app_id, + def __init__(self, _hs, profile_tag, user_id, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): super(HttpPusher, self).__init__( _hs, profile_tag, - user_name, + user_id, app_id, app_display_name, device_display_name, @@ -87,7 +87,7 @@ class HttpPusher(Pusher): } if event['type'] == 'm.room.member': d['notification']['membership'] = event['content']['membership'] - d['notification']['user_is_target'] = event['state_key'] == self.user_name + d['notification']['user_is_target'] = event['state_key'] == self.user_id if 'content' in event: d['notification']['content'] = event['content'] diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 705ab8c967..b0283743a2 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -27,17 +27,17 @@ logger = logging.getLogger(__name__) @defer.inlineCallbacks -def evaluator_for_user_name_and_profile_tag(user_name, profile_tag, room_id, store): - rawrules = yield store.get_push_rules_for_user(user_name) - enabled_map = yield store.get_push_rules_enabled_for_user(user_name) +def evaluator_for_user_id_and_profile_tag(user_id, profile_tag, room_id, store): + rawrules = yield store.get_push_rules_for_user(user_id) + enabled_map = yield store.get_push_rules_enabled_for_user(user_id) our_member_event = yield store.get_current_state( room_id=room_id, event_type='m.room.member', - state_key=user_name, + state_key=user_id, ) defer.returnValue(PushRuleEvaluator( - user_name, profile_tag, rawrules, enabled_map, + user_id, profile_tag, rawrules, enabled_map, room_id, our_member_event, store )) @@ -46,9 +46,9 @@ class PushRuleEvaluator: DEFAULT_ACTIONS = [] INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") - def __init__(self, user_name, profile_tag, raw_rules, enabled_map, room_id, + def __init__(self, user_id, profile_tag, raw_rules, enabled_map, room_id, our_member_event, store): - self.user_name = user_name + self.user_id = user_id self.profile_tag = profile_tag self.room_id = room_id self.our_member_event = our_member_event @@ -61,7 +61,7 @@ class PushRuleEvaluator: rule['actions'] = json.loads(raw_rule['actions']) rules.append(rule) - user = UserID.from_string(self.user_name) + user = UserID.from_string(self.user_id) self.rules = baserules.list_with_base_rules(rules, user) self.enabled_map = enabled_map @@ -83,7 +83,7 @@ class PushRuleEvaluator: has configured both globally and per-room when we have the ability to do such things. """ - if ev['user_id'] == self.user_name: + if ev['user_id'] == self.user_id: # let's assume you probably know about messages you sent yourself defer.returnValue([]) @@ -124,13 +124,13 @@ class PushRuleEvaluator: if len(actions) == 0: logger.warn( "Ignoring rule id %s with no actions for user %s", - r['rule_id'], self.user_name + r['rule_id'], self.user_id ) continue if matches: logger.info( "%s matches for user %s, event %s", - r['rule_id'], self.user_name, ev['event_id'] + r['rule_id'], self.user_id, ev['event_id'] ) # filter out dont_notify as we treat an empty actions list @@ -141,7 +141,7 @@ class PushRuleEvaluator: logger.info( "No rules match for user %s, event %s", - self.user_name, ev['event_id'] + self.user_id, ev['event_id'] ) defer.returnValue(PushRuleEvaluator.DEFAULT_ACTIONS) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 4208e5c76c..12c4af14bd 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -38,12 +38,12 @@ class PusherPool: @defer.inlineCallbacks def user_presence_changed(self, user, state): - user_name = user.to_string() + user_id = user.to_string() # until we have read receipts, pushers use this to reset a user's # badge counters to zero for p in self.pushers.values(): - if p.user_name == user_name: + if p.user_id == user_id: yield p.presence_changed(state) @defer.inlineCallbacks @@ -52,14 +52,14 @@ class PusherPool: self._start_pushers(pushers) @defer.inlineCallbacks - def add_pusher(self, user_name, access_token, profile_tag, kind, app_id, + def add_pusher(self, user_id, access_token, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, lang, data): # we try to create the pusher just to validate the config: it # will then get pulled out of the database, # recreated, added and started: this means we have only one # code path adding pushers. self._create_pusher({ - "user_name": user_name, + "user_name": user_id, "kind": kind, "profile_tag": profile_tag, "app_id": app_id, @@ -74,7 +74,7 @@ class PusherPool: "failing_since": None }) yield self._add_pusher_to_store( - user_name, access_token, profile_tag, kind, app_id, + user_id, access_token, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, lang, data ) @@ -109,11 +109,11 @@ class PusherPool: self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks - def _add_pusher_to_store(self, user_name, access_token, profile_tag, kind, + def _add_pusher_to_store(self, user_id, access_token, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, lang, data): yield self.store.add_pusher( - user_name=user_name, + user_id=user_id, access_token=access_token, profile_tag=profile_tag, kind=kind, @@ -125,14 +125,14 @@ class PusherPool: lang=lang, data=data, ) - self._refresh_pusher(app_id, pushkey, user_name) + self._refresh_pusher(app_id, pushkey, user_id) def _create_pusher(self, pusherdict): if pusherdict['kind'] == 'http': return HttpPusher( self.hs, profile_tag=pusherdict['profile_tag'], - user_name=pusherdict['user_name'], + user_id=pusherdict['user_name'], app_id=pusherdict['app_id'], app_display_name=pusherdict['app_display_name'], device_display_name=pusherdict['device_display_name'], @@ -150,14 +150,14 @@ class PusherPool: ) @defer.inlineCallbacks - def _refresh_pusher(self, app_id, pushkey, user_name): + def _refresh_pusher(self, app_id, pushkey, user_id): resultlist = yield self.store.get_pushers_by_app_id_and_pushkey( app_id, pushkey ) p = None for r in resultlist: - if r['user_name'] == user_name: + if r['user_name'] == user_id: p = r if p: @@ -186,12 +186,12 @@ class PusherPool: logger.info("Started pushers") @defer.inlineCallbacks - def remove_pusher(self, app_id, pushkey, user_name): - fullid = "%s:%s:%s" % (app_id, pushkey, user_name) + def remove_pusher(self, app_id, pushkey, user_id): + fullid = "%s:%s:%s" % (app_id, pushkey, user_id) if fullid in self.pushers: logger.info("Stopping pusher %s", fullid) self.pushers[fullid].stop() del self.pushers[fullid] - yield self.store.delete_pusher_by_app_id_pushkey_user_name( - app_id, pushkey, user_name + yield self.store.delete_pusher_by_app_id_pushkey_user_id( + app_id, pushkey, user_id ) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index c0a21c0c12..df53824d2d 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -73,7 +73,7 @@ class PushRuleRestServlet(ClientV1RestServlet): try: yield self.hs.get_datastore().add_push_rule( - user_name=requester.user.to_string(), + user_id=requester.user.to_string(), rule_id=_namespaced_rule_id_from_spec(spec), priority_class=priority_class, conditions=conditions, @@ -206,7 +206,7 @@ class PushRuleRestServlet(ClientV1RestServlet): def on_OPTIONS(self, _): return 200, {} - def set_rule_attr(self, user_name, spec, val): + def set_rule_attr(self, user_id, spec, val): if spec['attr'] == 'enabled': if isinstance(val, dict) and "enabled" in val: val = val["enabled"] @@ -217,15 +217,15 @@ class PushRuleRestServlet(ClientV1RestServlet): raise SynapseError(400, "Value for 'enabled' must be boolean") namespaced_rule_id = _namespaced_rule_id_from_spec(spec) self.hs.get_datastore().set_push_rule_enabled( - user_name, namespaced_rule_id, val + user_id, namespaced_rule_id, val ) else: raise UnrecognizedRequestError() - def get_rule_attr(self, user_name, namespaced_rule_id, attr): + def get_rule_attr(self, user_id, namespaced_rule_id, attr): if attr == 'enabled': return self.hs.get_datastore().get_push_rule_enabled_by_user_rule_id( - user_name, namespaced_rule_id + user_id, namespaced_rule_id ) else: raise UnrecognizedRequestError() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index b162b210bc..e218ed215c 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -41,7 +41,7 @@ class PusherRestServlet(ClientV1RestServlet): and 'kind' in content and content['kind'] is None): yield pusher_pool.remove_pusher( - content['app_id'], content['pushkey'], user_name=user.to_string() + content['app_id'], content['pushkey'], user_id=user.to_string() ) defer.returnValue((200, {})) @@ -71,7 +71,7 @@ class PusherRestServlet(ClientV1RestServlet): try: yield pusher_pool.add_pusher( - user_name=user.to_string(), + user_id=user.to_string(), access_token=requester.access_token_id, profile_tag=content['profile_tag'], kind=content['kind'], diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 448009b4b6..2adfefd994 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -25,11 +25,11 @@ logger = logging.getLogger(__name__) class PushRuleStore(SQLBaseStore): @cachedInlineCallbacks() - def get_push_rules_for_user(self, user_name): + def get_push_rules_for_user(self, user_id): rows = yield self._simple_select_list( table="push_rules", keyvalues={ - "user_name": user_name, + "user_name": user_id, }, retcols=( "user_name", "rule_id", "priority_class", "priority", @@ -45,11 +45,11 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(rows) @cachedInlineCallbacks() - def get_push_rules_enabled_for_user(self, user_name): + def get_push_rules_enabled_for_user(self, user_id): results = yield self._simple_select_list( table="push_rules_enable", keyvalues={ - 'user_name': user_name + 'user_name': user_id }, retcols=( "user_name", "rule_id", "enabled", @@ -122,7 +122,7 @@ class PushRuleStore(SQLBaseStore): ) defer.returnValue(ret) - def _add_push_rule_relative_txn(self, txn, user_name, **kwargs): + def _add_push_rule_relative_txn(self, txn, user_id, **kwargs): after = kwargs.pop("after", None) relative_to_rule = kwargs.pop("before", after) @@ -130,7 +130,7 @@ class PushRuleStore(SQLBaseStore): txn, table="push_rules", keyvalues={ - "user_name": user_name, + "user_name": user_id, "rule_id": relative_to_rule, }, retcols=["priority_class", "priority"], @@ -154,7 +154,7 @@ class PushRuleStore(SQLBaseStore): new_rule.pop("before", None) new_rule.pop("after", None) new_rule['priority_class'] = priority_class - new_rule['user_name'] = user_name + new_rule['user_name'] = user_id new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) # check if the priority before/after is free @@ -170,7 +170,7 @@ class PushRuleStore(SQLBaseStore): "SELECT COUNT(*) FROM push_rules" " WHERE user_name = ? AND priority_class = ? AND priority = ?" ) - txn.execute(sql, (user_name, priority_class, new_rule_priority)) + txn.execute(sql, (user_id, priority_class, new_rule_priority)) res = txn.fetchall() num_conflicting = res[0][0] @@ -187,14 +187,14 @@ class PushRuleStore(SQLBaseStore): else: sql += ">= ?" - txn.execute(sql, (user_name, priority_class, new_rule_priority)) + txn.execute(sql, (user_id, priority_class, new_rule_priority)) txn.call_after( - self.get_push_rules_for_user.invalidate, (user_name,) + self.get_push_rules_for_user.invalidate, (user_id,) ) txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_name,) + self.get_push_rules_enabled_for_user.invalidate, (user_id,) ) self._simple_insert_txn( @@ -203,14 +203,14 @@ class PushRuleStore(SQLBaseStore): values=new_rule, ) - def _add_push_rule_highest_priority_txn(self, txn, user_name, + def _add_push_rule_highest_priority_txn(self, txn, user_id, priority_class, **kwargs): # find the highest priority rule in that class sql = ( "SELECT COUNT(*), MAX(priority) FROM push_rules" " WHERE user_name = ? and priority_class = ?" ) - txn.execute(sql, (user_name, priority_class)) + txn.execute(sql, (user_id, priority_class)) res = txn.fetchall() (how_many, highest_prio) = res[0] @@ -221,15 +221,15 @@ class PushRuleStore(SQLBaseStore): # and insert the new rule new_rule = kwargs new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) - new_rule['user_name'] = user_name + new_rule['user_name'] = user_id new_rule['priority_class'] = priority_class new_rule['priority'] = new_prio txn.call_after( - self.get_push_rules_for_user.invalidate, (user_name,) + self.get_push_rules_for_user.invalidate, (user_id,) ) txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_name,) + self.get_push_rules_enabled_for_user.invalidate, (user_id,) ) self._simple_insert_txn( @@ -239,48 +239,48 @@ class PushRuleStore(SQLBaseStore): ) @defer.inlineCallbacks - def delete_push_rule(self, user_name, rule_id): + def delete_push_rule(self, user_id, rule_id): """ Delete a push rule. Args specify the row to be deleted and can be any of the columns in the push_rule table, but below are the standard ones Args: - user_name (str): The matrix ID of the push rule owner + user_id (str): The matrix ID of the push rule owner rule_id (str): The rule_id of the rule to be deleted """ yield self._simple_delete_one( "push_rules", - {'user_name': user_name, 'rule_id': rule_id}, + {'user_name': user_id, 'rule_id': rule_id}, desc="delete_push_rule", ) - self.get_push_rules_for_user.invalidate((user_name,)) - self.get_push_rules_enabled_for_user.invalidate((user_name,)) + self.get_push_rules_for_user.invalidate((user_id,)) + self.get_push_rules_enabled_for_user.invalidate((user_id,)) @defer.inlineCallbacks - def set_push_rule_enabled(self, user_name, rule_id, enabled): + def set_push_rule_enabled(self, user_id, rule_id, enabled): ret = yield self.runInteraction( "_set_push_rule_enabled_txn", self._set_push_rule_enabled_txn, - user_name, rule_id, enabled + user_id, rule_id, enabled ) defer.returnValue(ret) - def _set_push_rule_enabled_txn(self, txn, user_name, rule_id, enabled): + def _set_push_rule_enabled_txn(self, txn, user_id, rule_id, enabled): new_id = self._push_rules_enable_id_gen.get_next_txn(txn) self._simple_upsert_txn( txn, "push_rules_enable", - {'user_name': user_name, 'rule_id': rule_id}, + {'user_name': user_id, 'rule_id': rule_id}, {'enabled': 1 if enabled else 0}, {'id': new_id}, ) txn.call_after( - self.get_push_rules_for_user.invalidate, (user_name,) + self.get_push_rules_for_user.invalidate, (user_id,) ) txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_name,) + self.get_push_rules_enabled_for_user.invalidate, (user_id,) ) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 2b90d6c622..8ec706178a 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -80,7 +80,7 @@ class PusherStore(SQLBaseStore): defer.returnValue(rows) @defer.inlineCallbacks - def add_pusher(self, user_name, access_token, profile_tag, kind, app_id, + def add_pusher(self, user_id, access_token, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, lang, data): try: @@ -90,7 +90,7 @@ class PusherStore(SQLBaseStore): dict( app_id=app_id, pushkey=pushkey, - user_name=user_name, + user_name=user_id, ), dict( access_token=access_token, @@ -112,38 +112,38 @@ class PusherStore(SQLBaseStore): raise StoreError(500, "Problem creating pusher.") @defer.inlineCallbacks - def delete_pusher_by_app_id_pushkey_user_name(self, app_id, pushkey, user_name): + def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id): yield self._simple_delete_one( "pushers", - {"app_id": app_id, "pushkey": pushkey, 'user_name': user_name}, - desc="delete_pusher_by_app_id_pushkey_user_name", + {"app_id": app_id, "pushkey": pushkey, 'user_name': user_id}, + desc="delete_pusher_by_app_id_pushkey_user_id", ) @defer.inlineCallbacks - def update_pusher_last_token(self, app_id, pushkey, user_name, last_token): + def update_pusher_last_token(self, app_id, pushkey, user_id, last_token): yield self._simple_update_one( "pushers", - {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name}, + {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_id}, {'last_token': last_token}, desc="update_pusher_last_token", ) @defer.inlineCallbacks - def update_pusher_last_token_and_success(self, app_id, pushkey, user_name, + def update_pusher_last_token_and_success(self, app_id, pushkey, user_id, last_token, last_success): yield self._simple_update_one( "pushers", - {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name}, + {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_id}, {'last_token': last_token, 'last_success': last_success}, desc="update_pusher_last_token_and_success", ) @defer.inlineCallbacks - def update_pusher_failing_since(self, app_id, pushkey, user_name, + def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since): yield self._simple_update_one( "pushers", - {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name}, + {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_id}, {'failing_since': failing_since}, desc="update_pusher_failing_since", ) -- cgit 1.4.1 From 244b356a37ada45d2a4e6aec1f08986aaa7eaaa1 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 13 Jan 2016 17:03:58 +0000 Subject: Delete unused code --- synapse/storage/appservice.py | 59 ------------------------------------------- 1 file changed, 59 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index eab58d9ce9..1a2b4678a2 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -15,7 +15,6 @@ import logging import urllib import yaml -from simplejson import JSONDecodeError import simplejson as json from twisted.internet import defer @@ -144,64 +143,6 @@ class ApplicationServiceStore(SQLBaseStore): return rooms_for_user_matching_user_id - def _parse_services_dict(self, results): - # SQL results in the form: - # [ - # { - # 'regex': "something", - # 'url': "something", - # 'namespace': enum, - # 'as_id': 0, - # 'token': "something", - # 'hs_token': "otherthing", - # 'id': 0 - # } - # ] - services = {} - for res in results: - as_token = res["token"] - if as_token is None: - continue - if as_token not in services: - # add the service - services[as_token] = { - "id": res["id"], - "url": res["url"], - "token": as_token, - "hs_token": res["hs_token"], - "sender": res["sender"], - "namespaces": { - ApplicationService.NS_USERS: [], - ApplicationService.NS_ALIASES: [], - ApplicationService.NS_ROOMS: [] - } - } - # add the namespace regex if one exists - ns_int = res["namespace"] - if ns_int is None: - continue - try: - services[as_token]["namespaces"][ - ApplicationService.NS_LIST[ns_int]].append( - json.loads(res["regex"]) - ) - except IndexError: - logger.error("Bad namespace enum '%s'. %s", ns_int, res) - except JSONDecodeError: - logger.error("Bad regex object '%s'", res["regex"]) - - service_list = [] - for service in services.values(): - service_list.append(ApplicationService( - token=service["token"], - url=service["url"], - namespaces=service["namespaces"], - hs_token=service["hs_token"], - sender=service["sender"], - id=service["id"] - )) - return service_list - def _load_appservice(self, as_info): required_string_fields = [ "url", "as_token", "hs_token", "sender_localpart" -- cgit 1.4.1 From f6fcff360250eb362986835c195a96825567a03d Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 13 Jan 2016 17:09:24 +0000 Subject: Don't start server if ASes are invalidly configured --- synapse/storage/appservice.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index eab58d9ce9..25a6f14f40 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -265,6 +265,7 @@ class ApplicationServiceStore(SQLBaseStore): except Exception as e: logger.error("Failed to load appservice from '%s'", config_file) logger.exception(e) + raise class ApplicationServiceTransactionStore(SQLBaseStore): -- cgit 1.4.1 From 2680043bc6a64053b93b9bab144aeb5f45007976 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 14 Jan 2016 14:34:01 +0000 Subject: Require ID and as_token be unique for ASs Defaults ID to as_token if not specified. This will change when IDs are fully supported. --- synapse/storage/appservice.py | 26 +++++++++- tests/appservice/test_appservice.py | 1 + tests/storage/test_appservice.py | 101 ++++++++++++++++++++++++++++++------ 3 files changed, 111 insertions(+), 17 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index f4bc457eca..b5aa55c0a3 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -20,6 +20,7 @@ from twisted.internet import defer from synapse.api.constants import Membership from synapse.appservice import ApplicationService, AppServiceTransaction +from synapse.config._base import ConfigError from synapse.storage.roommember import RoomsForUser from synapse.types import UserID from ._base import SQLBaseStore @@ -145,6 +146,7 @@ class ApplicationServiceStore(SQLBaseStore): def _load_appservice(self, as_info): required_string_fields = [ + # TODO: Add id here when it's stable to release "url", "as_token", "hs_token", "sender_localpart" ] for field in required_string_fields: @@ -186,7 +188,7 @@ class ApplicationServiceStore(SQLBaseStore): namespaces=as_info["namespaces"], hs_token=as_info["hs_token"], sender=user_id, - id=as_info["as_token"] # the token is the only unique thing here + id=as_info["id"] if "id" in as_info else as_info["as_token"], ) def _populate_appservice_cache(self, config_files): @@ -197,10 +199,32 @@ class ApplicationServiceStore(SQLBaseStore): ) return + # Dicts of value -> filename + seen_as_tokens = {} + seen_ids = {} + for config_file in config_files: try: with open(config_file, 'r') as f: appservice = self._load_appservice(yaml.load(f)) + if appservice.id in seen_ids: + raise ConfigError( + "Cannot reuse ID across application services: " + "%s (files: %s, %s)" % ( + appservice.id, config_file, seen_ids[appservice.id], + ) + ) + seen_ids[appservice.id] = config_file + if appservice.token in seen_as_tokens: + raise ConfigError( + "Cannot reuse as_token across application services: " + "%s (files: %s, %s)" % ( + appservice.token, + config_file, + seen_as_tokens[appservice.token], + ) + ) + seen_as_tokens[appservice.token] = config_file logger.info("Loaded application service: %s", appservice) self.services_cache.append(appservice) except Exception as e: diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 191c420c4d..ef48bbc296 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -29,6 +29,7 @@ class ApplicationServiceTestCase(unittest.TestCase): def setUp(self): self.service = ApplicationService( + id="unique_identifier", url="some_url", token="some_token", namespaces={ diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index a5a464640f..5abecdf6e0 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -12,12 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import tempfile +from synapse.config._base import ConfigError from tests import unittest from twisted.internet import defer from tests.utils import setup_test_homeserver from synapse.appservice import ApplicationService, ApplicationServiceState -from synapse.server import HomeServer from synapse.storage.appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore ) @@ -26,7 +27,6 @@ import json import os import yaml from mock import Mock -from tests.utils import SQLiteMemoryDbPool, MockClock class ApplicationServiceStoreTestCase(unittest.TestCase): @@ -41,9 +41,16 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): self.as_token = "token1" self.as_url = "some_url" - self._add_appservice(self.as_token, self.as_url, "some_hs_token", "bob") - self._add_appservice("token2", "some_url", "some_hs_token", "bob") - self._add_appservice("token3", "some_url", "some_hs_token", "bob") + self.as_id = "as1" + self._add_appservice( + self.as_token, + self.as_id, + self.as_url, + "some_hs_token", + "bob" + ) + self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob") + self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob") # must be done after inserts self.store = ApplicationServiceStore(hs) @@ -55,9 +62,9 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): except: pass - def _add_appservice(self, as_token, url, hs_token, sender): + def _add_appservice(self, as_token, id, url, hs_token, sender): as_yaml = dict(url=url, as_token=as_token, hs_token=hs_token, - sender_localpart=sender, namespaces={}) + id=id, sender_localpart=sender, namespaces={}) # use the token as the filename with open(as_token, 'w') as outfile: outfile.write(yaml.dump(as_yaml)) @@ -74,6 +81,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): self.as_token ) self.assertEquals(stored_service.token, self.as_token) + self.assertEquals(stored_service.id, self.as_id) self.assertEquals(stored_service.url, self.as_url) self.assertEquals( stored_service.namespaces[ApplicationService.NS_ALIASES], @@ -110,34 +118,34 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): { "token": "token1", "url": "https://matrix-as.org", - "id": "token1" + "id": "id_1" }, { "token": "alpha_tok", "url": "https://alpha.com", - "id": "alpha_tok" + "id": "id_alpha" }, { "token": "beta_tok", "url": "https://beta.com", - "id": "beta_tok" + "id": "id_beta" }, { - "token": "delta_tok", - "url": "https://delta.com", - "id": "delta_tok" + "token": "gamma_tok", + "url": "https://gamma.com", + "id": "id_gamma" }, ] for s in self.as_list: - yield self._add_service(s["url"], s["token"]) + yield self._add_service(s["url"], s["token"], s["id"]) self.as_yaml_files = [] self.store = TestTransactionStore(hs) - def _add_service(self, url, as_token): + def _add_service(self, url, as_token, id): as_yaml = dict(url=url, as_token=as_token, hs_token="something", - sender_localpart="a_sender", namespaces={}) + id=id, sender_localpart="a_sender", namespaces={}) # use the token as the filename with open(as_token, 'w') as outfile: outfile.write(yaml.dump(as_yaml)) @@ -405,3 +413,64 @@ class TestTransactionStore(ApplicationServiceTransactionStore, def __init__(self, hs): super(TestTransactionStore, self).__init__(hs) + + +class ApplicationServiceStoreConfigTestCase(unittest.TestCase): + + def _write_config(self, suffix, **kwargs): + vals = { + "id": "id" + suffix, + "url": "url" + suffix, + "as_token": "as_token" + suffix, + "hs_token": "hs_token" + suffix, + "sender_localpart": "sender_localpart" + suffix, + "namespaces": {}, + } + vals.update(kwargs) + + _, path = tempfile.mkstemp(prefix="as_config") + with open(path, "w") as f: + f.write(yaml.dump(vals)) + return path + + @defer.inlineCallbacks + def test_unique_works(self): + f1 = self._write_config(suffix="1") + f2 = self._write_config(suffix="2") + + config = Mock(app_service_config_files=[f1, f2]) + hs = yield setup_test_homeserver(config=config) + + ApplicationServiceStore(hs) + + @defer.inlineCallbacks + def test_duplicate_ids(self): + f1 = self._write_config(id="id", suffix="1") + f2 = self._write_config(id="id", suffix="2") + + config = Mock(app_service_config_files=[f1, f2]) + hs = yield setup_test_homeserver(config=config) + + with self.assertRaises(ConfigError) as cm: + ApplicationServiceStore(hs) + + e = cm.exception + self.assertIn(f1, e.message) + self.assertIn(f2, e.message) + self.assertIn("id", e.message) + + @defer.inlineCallbacks + def test_duplicate_as_tokens(self): + f1 = self._write_config(as_token="as_token", suffix="1") + f2 = self._write_config(as_token="as_token", suffix="2") + + config = Mock(app_service_config_files=[f1, f2]) + hs = yield setup_test_homeserver(config=config) + + with self.assertRaises(ConfigError) as cm: + ApplicationServiceStore(hs) + + e = cm.exception + self.assertIn(f1, e.message) + self.assertIn(f2, e.message) + self.assertIn("as_token", e.message) -- cgit 1.4.1 From cc66a9a5e3fc954b0da48ba891e9f77be31aa832 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jan 2016 10:45:09 +0000 Subject: Allow filtering events for multiple users at once --- synapse/handlers/_base.py | 93 +++++++++++++++++++++++++------------------ synapse/storage/roommember.py | 13 ++++++ 2 files changed, 67 insertions(+), 39 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index bb2c6733d5..2d1167296a 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -53,16 +53,54 @@ class BaseHandler(object): self.event_builder_factory = hs.get_event_builder_factory() @defer.inlineCallbacks - def _filter_events_for_client(self, user_id, events, is_guest=False): - # Assumes that user has at some point joined the room if not is_guest. + def _filter_events_for_clients(self, users, events): + """ Returns dict of user_id -> list of events that user is allowed to + see. + """ + event_id_to_state = yield self.store.get_state_for_events( + frozenset(e.event_id for e in events), + types=( + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.Member, None), + ) + ) + + forgotten = yield defer.gatherResults([ + self.store.who_forgot_in_room( + room_id, + ) + for room_id in frozenset(e.room_id for e in events) + ], consumeErrors=True) + + # Set of membership event_ids that have been forgotten + event_id_forgotten = frozenset( + row["event_id"] for rows in forgotten for row in rows + ) + + def allowed(event, user_id, is_guest): + state = event_id_to_state[event.event_id] + + visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) + if visibility_event: + visibility = visibility_event.content.get("history_visibility", "shared") + else: + visibility = "shared" - def allowed(event, membership, visibility): if visibility == "world_readable": return True if is_guest: return False + membership_event = state.get((EventTypes.Member, user_id), None) + if membership_event: + if membership_event.event_id in event_id_forgotten: + membership = None + else: + membership = membership_event.membership + else: + membership = None + if membership == Membership.JOIN: return True @@ -78,43 +116,20 @@ class BaseHandler(object): return True - event_id_to_state = yield self.store.get_state_for_events( - frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.Member, user_id), - ) - ) - - events_to_return = [] - for event in events: - state = event_id_to_state[event.event_id] + defer.returnValue({ + user_id: [ + event + for event in events + if allowed(event, user_id, is_guest) + ] + for user_id, is_guest in users + }) - membership_event = state.get((EventTypes.Member, user_id), None) - if membership_event: - was_forgotten_at_event = yield self.store.was_forgotten_at( - membership_event.state_key, - membership_event.room_id, - membership_event.event_id - ) - if was_forgotten_at_event: - membership = None - else: - membership = membership_event.membership - else: - membership = None - - visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) - if visibility_event: - visibility = visibility_event.content.get("history_visibility", "shared") - else: - visibility = "shared" - - should_include = allowed(event, membership, visibility) - if should_include: - events_to_return.append(event) - - defer.returnValue(events_to_return) + @defer.inlineCallbacks + def _filter_events_for_client(self, user_id, events, is_guest=False): + # Assumes that user has at some point joined the room if not is_guest. + res = yield self._filter_events_for_clients([(user_id, is_guest)], events) + defer.returnValue(res.get(user_id, [])) def ratelimit(self, user_id): time_now = self.clock.time() diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 7d3ce4579d..68ac88905f 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -287,6 +287,7 @@ class RoomMemberStore(SQLBaseStore): txn.execute(sql, (user_id, room_id)) yield self.runInteraction("forget_membership", f) self.was_forgotten_at.invalidate_all() + self.who_forgot_in_room.invalidate_all() self.did_forget.invalidate((user_id, room_id)) @cachedInlineCallbacks(num_args=2) @@ -336,3 +337,15 @@ class RoomMemberStore(SQLBaseStore): return rows[0][0] forgot = yield self.runInteraction("did_forget_membership_at", f) defer.returnValue(forgot == 1) + + @cached() + def who_forgot_in_room(self, room_id): + return self._simple_select_list( + table="room_memberships", + retcols=("user_id", "event_id"), + keyvalues={ + "room_id": room_id, + "forgotten": 1, + }, + desc="who_forgot" + ) -- cgit 1.4.1 From f59b56450797746230046137b2e2008cb66cb604 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jan 2016 14:09:47 +0000 Subject: Make notifications go quicker --- synapse/push/bulk_push_rule_evaluator.py | 116 +++++++++------- synapse/push/push_rule_evaluator.py | 226 ++++++++++++++++++++----------- synapse/storage/push_rule.py | 23 +++- synapse/storage/registration.py | 26 +++- 4 files changed, 260 insertions(+), 131 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index ce244fa959..b9f78fd598 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -14,16 +14,16 @@ # limitations under the License. import logging -import simplejson as json +import ujson as json from twisted.internet import defer -from synapse.types import UserID - import baserules -from push_rule_evaluator import PushRuleEvaluator +from push_rule_evaluator import PushRuleEvaluatorForEvent + +from synapse.api.constants import EventTypes +from synapse.types import UserID -from synapse.events.utils import serialize_event logger = logging.getLogger(__name__) @@ -35,28 +35,25 @@ def decode_rule_json(rule): @defer.inlineCallbacks -def evaluator_for_room_id(room_id, store): - users = yield store.get_users_in_room(room_id) - rules_by_user = yield store.bulk_get_push_rules(users) +def _get_rules(room_id, user_ids, store): + rules_by_user = yield store.bulk_get_push_rules(user_ids) rules_by_user = { uid: baserules.list_with_base_rules( - [decode_rule_json(rule_list) for rule_list in rules_by_user[uid]] - if uid in rules_by_user else [], + [decode_rule_json(rule_list) for rule_list in rules_by_user.get(uid, [])], UserID.from_string(uid), ) - for uid in users + for uid in user_ids } - member_events = yield store.get_current_state( - room_id=room_id, - event_type='m.room.member', - ) - display_names = {} - for ev in member_events: - if ev.content.get("displayname"): - display_names[ev.state_key] = ev.content.get("displayname") + defer.returnValue(rules_by_user) + + +@defer.inlineCallbacks +def evaluator_for_room_id(room_id, store): + users = yield store.get_users_in_room(room_id) + rules_by_user = yield _get_rules(room_id, users, store) defer.returnValue(BulkPushRuleEvaluator( - room_id, rules_by_user, display_names, users, store + room_id, rules_by_user, users, store )) @@ -69,10 +66,9 @@ class BulkPushRuleEvaluator: the same logic to run the actual rules, but could be optimised further (see https://matrix.org/jira/browse/SYN-562) """ - def __init__(self, room_id, rules_by_user, display_names, users_in_room, store): + def __init__(self, room_id, rules_by_user, users_in_room, store): self.room_id = room_id self.rules_by_user = rules_by_user - self.display_names = display_names self.users_in_room = users_in_room self.store = store @@ -80,15 +76,30 @@ class BulkPushRuleEvaluator: def action_for_event_by_user(self, event, handler): actions_by_user = {} + users_dict = yield self.store.are_guests(self.rules_by_user.keys()) + + filtered_by_user = yield handler._filter_events_for_clients( + users_dict.items(), [event] + ) + + evaluator = PushRuleEvaluatorForEvent.create(event, len(self.users_in_room)) + + condition_cache = {} + + member_state = yield self.store.get_state_for_event( + event.event_id, + ) + + display_names = {} + for ev in member_state.values(): + nm = ev.content.get("displayname", None) + if nm and ev.type == EventTypes.Member: + display_names[ev.state_key] = nm + for uid, rules in self.rules_by_user.items(): - display_name = None - if uid in self.display_names: - display_name = self.display_names[uid] - - is_guest = yield self.store.is_guest(UserID.from_string(uid)) - filtered = yield handler._filter_events_for_client( - uid, [event], is_guest=is_guest - ) + display_name = display_names.get(uid, None) + + filtered = filtered_by_user[uid] if len(filtered) == 0: continue @@ -96,29 +107,32 @@ class BulkPushRuleEvaluator: if 'enabled' in rule and not rule['enabled']: continue - # XXX: profile tags - if BulkPushRuleEvaluator.event_matches_rule( - event, rule, - display_name, len(self.users_in_room), None - ): + matches = _condition_checker( + evaluator, rule['conditions'], display_name, condition_cache + ) + if matches: actions = [x for x in rule['actions'] if x != 'dont_notify'] - if len(actions) > 0: + if actions: actions_by_user[uid] = actions break defer.returnValue(actions_by_user) - @staticmethod - def event_matches_rule(event, rule, - display_name, room_member_count, profile_tag): - matches = True - - # passing the clock all the way into here is extremely awkward and push - # rules do not care about any of the relative timestamps, so we just - # pass 0 for the current time. - client_event = serialize_event(event, 0) - - for cond in rule['conditions']: - matches &= PushRuleEvaluator._event_fulfills_condition( - client_event, cond, display_name, room_member_count, profile_tag - ) - return matches + +def _condition_checker(evaluator, conditions, display_name, cache): + for cond in conditions: + _id = cond.get("_id", None) + if _id: + res = cache.get(_id, None) + if res is False: + break + elif res is True: + continue + + res = evaluator.matches(cond, display_name, None) + if _id: + cache[_id] = res + + if res is False: + return False + + return True diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index b0283743a2..bbc8308c2d 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -15,17 +15,22 @@ from twisted.internet import defer -from synapse.types import UserID - import baserules import logging import simplejson as json import re +from synapse.types import UserID + logger = logging.getLogger(__name__) +GLOB_REGEX = re.compile(r'\\\[(\\\!|)(.*)\\\]') +IS_GLOB = re.compile(r'[\?\*\[\]]') +INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") + + @defer.inlineCallbacks def evaluator_for_user_id_and_profile_tag(user_id, profile_tag, room_id, store): rawrules = yield store.get_push_rules_for_user(user_id) @@ -42,9 +47,34 @@ def evaluator_for_user_id_and_profile_tag(user_id, profile_tag, room_id, store): )) +def _room_member_count(ev, condition, room_member_count): + if 'is' not in condition: + return False + m = INEQUALITY_EXPR.match(condition['is']) + if not m: + return False + ineq = m.group(1) + rhs = m.group(2) + if not rhs.isdigit(): + return False + rhs = int(rhs) + + if ineq == '' or ineq == '==': + return room_member_count == rhs + elif ineq == '<': + return room_member_count < rhs + elif ineq == '>': + return room_member_count > rhs + elif ineq == '>=': + return room_member_count >= rhs + elif ineq == '<=': + return room_member_count <= rhs + else: + return False + + class PushRuleEvaluator: DEFAULT_ACTIONS = [] - INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") def __init__(self, user_id, profile_tag, raw_rules, enabled_map, room_id, our_member_event, store): @@ -98,6 +128,8 @@ class PushRuleEvaluator: room_members = yield self.store.get_users_in_room(room_id) room_member_count = len(room_members) + evaluator = PushRuleEvaluatorForEvent.create(ev, room_member_count) + for r in self.rules: if r['rule_id'] in self.enabled_map: r['enabled'] = self.enabled_map[r['rule_id']] @@ -105,21 +137,10 @@ class PushRuleEvaluator: r['enabled'] = True if not r['enabled']: continue - matches = True conditions = r['conditions'] actions = r['actions'] - for c in conditions: - matches &= self._event_fulfills_condition( - ev, c, display_name=my_display_name, - room_member_count=room_member_count, - profile_tag=self.profile_tag - ) - logger.debug( - "Rule %s %s", - r['rule_id'], "matches" if matches else "doesn't match" - ) # ignore rules with no actions (we have an explict 'dont_notify') if len(actions) == 0: logger.warn( @@ -127,6 +148,18 @@ class PushRuleEvaluator: r['rule_id'], self.user_id ) continue + + matches = True + for c in conditions: + matches = evaluator.matches(c, my_display_name, self.profile_tag) + if not matches: + break + + logger.debug( + "Rule %s %s", + r['rule_id'], "matches" if matches else "doesn't match" + ) + if matches: logger.info( "%s matches for user %s, event %s", @@ -145,81 +178,84 @@ class PushRuleEvaluator: ) defer.returnValue(PushRuleEvaluator.DEFAULT_ACTIONS) - @staticmethod - def _glob_to_regexp(glob): - r = re.escape(glob) - r = re.sub(r'\\\*', r'.*?', r) - r = re.sub(r'\\\?', r'.', r) - # handle [abc], [a-z] and [!a-z] style ranges. - r = re.sub(r'\\\[(\\\!|)(.*)\\\]', - lambda x: ('[%s%s]' % (x.group(1) and '^' or '', - re.sub(r'\\\-', '-', x.group(2)))), r) - return r +class PushRuleEvaluatorForEvent(object): + WORD_BOUNDARY = re.compile(r'\b') + + def __init__(self, event, body_parts, room_member_count): + self._event = event + self._body_parts = body_parts + self._room_member_count = room_member_count + + self._value_cache = _flatten_dict(event) @staticmethod - def _event_fulfills_condition(ev, condition, - display_name, room_member_count, profile_tag): - if condition['kind'] == 'event_match': - if 'pattern' not in condition: - logger.warn("event_match condition with no pattern") - return False - # XXX: optimisation: cache our pattern regexps - if condition['key'] == 'content.body': - r = r'\b%s\b' % PushRuleEvaluator._glob_to_regexp(condition['pattern']) - else: - r = r'^%s$' % PushRuleEvaluator._glob_to_regexp(condition['pattern']) - val = _value_for_dotted_key(condition['key'], ev) - if val is None: - return False - return re.search(r, val, flags=re.IGNORECASE) is not None + def create(event, room_member_count): + body = event.get("content", {}).get("body", None) + if body: + body_parts = PushRuleEvaluatorForEvent.WORD_BOUNDARY.split(body) + body_parts[:] = [ + part.lower() for part in body_parts + ] + else: + body_parts = [] + + return PushRuleEvaluatorForEvent(event, body_parts, room_member_count) + def matches(self, condition, display_name, profile_tag): + if condition['kind'] == 'event_match': + return self._event_match(condition) elif condition['kind'] == 'device': if 'profile_tag' not in condition: return True return condition['profile_tag'] == profile_tag - elif condition['kind'] == 'contains_display_name': - # This is special because display names can be different - # between rooms and so you can't really hard code it in a rule. - # Optimisation: we should cache these names and update them from - # the event stream. - if 'content' not in ev or 'body' not in ev['content']: - return False - if not display_name: - return False - return re.search( - r"\b%s\b" % re.escape(display_name), ev['content']['body'], - flags=re.IGNORECASE - ) is not None - + return self._contains_display_name(display_name) elif condition['kind'] == 'room_member_count': - if 'is' not in condition: - return False - m = PushRuleEvaluator.INEQUALITY_EXPR.match(condition['is']) - if not m: - return False - ineq = m.group(1) - rhs = m.group(2) - if not rhs.isdigit(): - return False - rhs = int(rhs) - - if ineq == '' or ineq == '==': - return room_member_count == rhs - elif ineq == '<': - return room_member_count < rhs - elif ineq == '>': - return room_member_count > rhs - elif ineq == '>=': - return room_member_count >= rhs - elif ineq == '<=': - return room_member_count <= rhs - else: - return False + return _room_member_count( + self._event, condition, self._room_member_count + ) else: return True + def _event_match(self, condition): + pattern = condition.get('pattern', None) + + if not pattern: + logger.warn("event_match condition with no pattern") + return False + + # XXX: optimisation: cache our pattern regexps + if condition['key'] == 'content.body': + matcher = _glob_to_matcher(pattern) + + for part in self._body_parts: + if matcher(part): + return True + return False + else: + haystack = self._get_value(condition['key']) + if haystack is None: + return False + + matcher = _glob_to_matcher(pattern) + + return matcher(haystack.lower()) + + def _contains_display_name(self, display_name): + if not display_name: + return False + + lower_display_name = display_name.lower() + for part in self._body_parts: + if part == lower_display_name: + return True + + return False + + def _get_value(self, dotted_key): + return self._value_cache.get(dotted_key, None) + def _value_for_dotted_key(dotted_key, event): parts = dotted_key.split(".") @@ -229,4 +265,42 @@ def _value_for_dotted_key(dotted_key, event): return None val = val[parts[0]] parts = parts[1:] + return val + + +def _glob_to_matcher(glob): + glob = glob.lower() + + if not IS_GLOB.search(glob): + return lambda value: value == glob + + r = re.escape(glob) + + r = r.replace(r'\*', '.*?') + r = r.replace(r'\?', '.') + + # handle [abc], [a-z] and [!a-z] style ranges. + r = GLOB_REGEX.sub( + lambda x: ( + '[%s%s]' % ( + x.group(1) and '^' or '', + x.group(2).replace(r'\\\-', '-') + ) + ), + r, + ) + + r = r + "$" + r = re.compile(r) + return lambda value: r.match(value) + + +def _flatten_dict(d, prefix=[], result={}): + for key, value in d.items(): + if isinstance(value, basestring): + result[".".join(prefix + [key])] = value.lower() + elif hasattr(value, "items"): + _flatten_dict(value, prefix=(prefix+[key]), result=result) + + return result diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 2adfefd994..1adf28b893 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -14,7 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList from twisted.internet import defer import logging @@ -60,6 +60,27 @@ class PushRuleStore(SQLBaseStore): r['rule_id']: False if r['enabled'] == 0 else True for r in results }) + @cached() + def _get_push_rules_enabled_for_user(self, user_id): + def f(txn): + sql = ( + "SELECT pr.*" + " FROM push_rules AS pr" + " LEFT JOIN push_rules_enable AS pre" + " ON pr.user_name = pre.user_name AND pr.rule_id = pre.rule_id" + " WHERE pr.user_name = ?" + " AND (pre.enabled IS NULL OR pre.enabled = 1)" + " ORDER BY pr.priority_class DESC, pr.priority DESC" + ) + txn.execute(sql, (user_id,)) + return self.cursor_to_dict(txn) + + return self.runInteraction( + "_get_push_rules_enabled_for_user", f + ) + + # @cachedList(cache=_get_push_rules_enabled_for_user.cache, list_name="user_ids", + # num_args=1, inlineCallbacks=True) @defer.inlineCallbacks def bulk_get_push_rules(self, user_ids): if not user_ids: diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 999b710fbb..70cde0d04d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError, Codes from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList class RegistrationStore(SQLBaseStore): @@ -256,10 +256,10 @@ class RegistrationStore(SQLBaseStore): defer.returnValue(res if res else False) @cachedInlineCallbacks() - def is_guest(self, user): + def is_guest(self, user_id): res = yield self._simple_select_one_onecol( table="users", - keyvalues={"name": user.to_string()}, + keyvalues={"name": user_id}, retcol="is_guest", allow_none=True, desc="is_guest", @@ -267,6 +267,26 @@ class RegistrationStore(SQLBaseStore): defer.returnValue(res if res else False) + @cachedList(cache=is_guest.cache, list_name="user_ids", num_args=1, + inlineCallbacks=True) + def are_guests(self, user_ids): + sql = "SELECT name, is_guest FROM users WHERE name IN (%s)" % ( + ",".join("?" for _ in user_ids), + ) + + rows = yield self._execute( + "are_guests", self.cursor_to_dict, sql, *user_ids + ) + + result = {user_id: False for user_id in user_ids} + + result.update({ + row["name"]: bool(row["is_guest"]) + for row in rows + }) + + defer.returnValue(result) + def _query_for_auth(self, txn, token): sql = ( "SELECT users.name, users.is_guest, access_tokens.id as token_id" -- cgit 1.4.1 From 2c176e02ae910ce52197539b31f78ae1b1ef4c3c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jan 2016 14:24:31 +0000 Subject: Make unit tests work --- synapse/storage/push_rule.py | 2 +- tests/handlers/test_federation.py | 141 ------------- tests/handlers/test_room.py | 418 -------------------------------------- 3 files changed, 1 insertion(+), 560 deletions(-) delete mode 100644 tests/handlers/test_federation.py delete mode 100644 tests/handlers/test_room.py (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 1adf28b893..f210e6c14d 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -14,7 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from twisted.internet import defer import logging diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py deleted file mode 100644 index 11a3d94bb0..0000000000 --- a/tests/handlers/test_federation.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from twisted.internet import defer -from tests import unittest - -from synapse.api.constants import EventTypes -from synapse.events import FrozenEvent -from synapse.handlers.federation import FederationHandler - -from mock import NonCallableMock, ANY, Mock - -from ..utils import setup_test_homeserver - - -class FederationTestCase(unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - - self.state_handler = NonCallableMock(spec_set=[ - "compute_event_context", - ]) - - self.auth = NonCallableMock(spec_set=[ - "check", - "check_host_in_room", - ]) - - self.hostname = "test" - hs = yield setup_test_homeserver( - self.hostname, - datastore=NonCallableMock(spec_set=[ - "persist_event", - "store_room", - "get_room", - "get_destination_retry_timings", - "set_destination_retry_timings", - "have_events", - "get_users_in_room", - "bulk_get_push_rules", - "get_current_state", - "set_push_actions_for_event_and_users", - "is_guest", - "get_state_for_events", - ]), - resource_for_federation=NonCallableMock(), - http_client=NonCallableMock(spec_set=[]), - notifier=NonCallableMock(spec_set=["on_new_room_event"]), - handlers=NonCallableMock(spec_set=[ - "room_member_handler", - "federation_handler", - ]), - auth=self.auth, - state_handler=self.state_handler, - keyring=Mock(), - ) - - self.datastore = hs.get_datastore() - self.handlers = hs.get_handlers() - self.notifier = hs.get_notifier() - self.hs = hs - - self.handlers.federation_handler = FederationHandler(self.hs) - - self.datastore.get_state_for_events.return_value = {"$a:b": {}} - - @defer.inlineCallbacks - def test_msg(self): - pdu = FrozenEvent({ - "type": EventTypes.Message, - "room_id": "foo", - "content": {"msgtype": u"fooo"}, - "origin_server_ts": 0, - "event_id": "$a:b", - "user_id":"@a:b", - "origin": "b", - "auth_events": [], - "hashes": {"sha256":"AcLrgtUIqqwaGoHhrEvYG1YLDIsVPYJdSRGhkp3jJp8"}, - }) - - self.datastore.persist_event.return_value = defer.succeed((1,1)) - self.datastore.get_room.return_value = defer.succeed(True) - self.datastore.get_users_in_room.return_value = ["@a:b"] - self.datastore.bulk_get_push_rules.return_value = {} - self.datastore.get_current_state.return_value = {} - self.auth.check_host_in_room.return_value = defer.succeed(True) - - retry_timings_res = { - "destination": "", - "retry_last_ts": 0, - "retry_interval": 0, - } - self.datastore.get_destination_retry_timings.return_value = ( - defer.succeed(retry_timings_res) - ) - - def have_events(event_ids): - return defer.succeed({}) - self.datastore.have_events.side_effect = have_events - - def annotate(ev, old_state=None, outlier=False): - context = Mock() - context.current_state = {} - context.auth_events = {} - return defer.succeed(context) - self.state_handler.compute_event_context.side_effect = annotate - - yield self.handlers.federation_handler.on_receive_pdu( - "fo", pdu, False - ) - - self.datastore.persist_event.assert_called_once_with( - ANY, - is_new_state=True, - backfilled=False, - current_state=None, - context=ANY, - ) - - self.state_handler.compute_event_context.assert_called_once_with( - ANY, old_state=None, outlier=False - ) - - self.auth.check.assert_called_once_with(ANY, auth_events={}) - - self.notifier.on_new_room_event.assert_called_once_with( - ANY, 1, 1, extra_users=[] - ) diff --git a/tests/handlers/test_room.py b/tests/handlers/test_room.py deleted file mode 100644 index e7a12a2ba2..0000000000 --- a/tests/handlers/test_room.py +++ /dev/null @@ -1,418 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from twisted.internet import defer -from .. import unittest - -from synapse.api.constants import EventTypes, Membership -from synapse.handlers.room import RoomMemberHandler, RoomCreationHandler -from synapse.handlers.profile import ProfileHandler -from synapse.types import UserID -from ..utils import setup_test_homeserver - -from mock import Mock, NonCallableMock - - -class RoomMemberHandlerTestCase(unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - self.hostname = "red" - hs = yield setup_test_homeserver( - self.hostname, - ratelimiter=NonCallableMock(spec_set=[ - "send_message", - ]), - datastore=NonCallableMock(spec_set=[ - "persist_event", - "get_room_member", - "get_room", - "store_room", - "get_latest_events_in_room", - "add_event_hashes", - "get_users_in_room", - "bulk_get_push_rules", - "get_current_state", - "set_push_actions_for_event_and_users", - "get_state_for_events", - "is_guest", - ]), - resource_for_federation=NonCallableMock(), - http_client=NonCallableMock(spec_set=[]), - notifier=NonCallableMock(spec_set=["on_new_room_event"]), - handlers=NonCallableMock(spec_set=[ - "room_member_handler", - "profile_handler", - "federation_handler", - ]), - auth=NonCallableMock(spec_set=[ - "check", - "add_auth_events", - "check_host_in_room", - ]), - state_handler=NonCallableMock(spec_set=[ - "compute_event_context", - "get_current_state", - ]), - ) - - self.federation = NonCallableMock(spec_set=[ - "handle_new_event", - "send_invite", - "get_state_for_room", - ]) - - self.datastore = hs.get_datastore() - self.handlers = hs.get_handlers() - self.notifier = hs.get_notifier() - self.state_handler = hs.get_state_handler() - self.distributor = hs.get_distributor() - self.auth = hs.get_auth() - self.hs = hs - - self.handlers.federation_handler = self.federation - - self.distributor.declare("collect_presencelike_data") - - self.handlers.room_member_handler = RoomMemberHandler(self.hs) - self.handlers.profile_handler = ProfileHandler(self.hs) - self.room_member_handler = self.handlers.room_member_handler - - self.ratelimiter = hs.get_ratelimiter() - self.ratelimiter.send_message.return_value = (True, 0) - - self.datastore.persist_event.return_value = (1,1) - self.datastore.add_event_hashes.return_value = [] - self.datastore.get_users_in_room.return_value = ["@bob:red"] - self.datastore.bulk_get_push_rules.return_value = {} - - @defer.inlineCallbacks - def test_invite(self): - room_id = "!foo:red" - user_id = "@bob:red" - target_user_id = "@red:blue" - content = {"membership": Membership.INVITE} - - builder = self.hs.get_event_builder_factory().new({ - "type": EventTypes.Member, - "sender": user_id, - "state_key": target_user_id, - "room_id": room_id, - "content": content, - }) - - self.datastore.get_latest_events_in_room.return_value = ( - defer.succeed([]) - ) - self.datastore.get_current_state.return_value = {} - self.datastore.get_state_for_events = lambda event_ids,types: {x: {} for x in event_ids} - - def annotate(_): - ctx = Mock() - ctx.current_state = { - (EventTypes.Member, "@alice:green"): self._create_member( - user_id="@alice:green", - room_id=room_id, - ), - (EventTypes.Member, "@bob:red"): self._create_member( - user_id="@bob:red", - room_id=room_id, - ), - } - ctx.prev_state_events = [] - - return defer.succeed(ctx) - - self.state_handler.compute_event_context.side_effect = annotate - - def add_auth(_, ctx): - ctx.auth_events = ctx.current_state[ - (EventTypes.Member, "@bob:red") - ] - - return defer.succeed(True) - self.auth.add_auth_events.side_effect = add_auth - - def send_invite(domain, event): - return defer.succeed(event) - - self.federation.send_invite.side_effect = send_invite - - room_handler = self.room_member_handler - event, context = yield room_handler._create_new_client_event( - builder - ) - - yield room_handler.send_membership_event(event, context) - - self.state_handler.compute_event_context.assert_called_once_with( - builder - ) - - self.auth.add_auth_events.assert_called_once_with( - builder, context - ) - - self.federation.send_invite.assert_called_once_with( - "blue", event, - ) - - self.datastore.persist_event.assert_called_once_with( - event, context=context, - ) - self.notifier.on_new_room_event.assert_called_once_with( - event, 1, 1, extra_users=[UserID.from_string(target_user_id)] - ) - self.assertFalse(self.datastore.get_room.called) - self.assertFalse(self.datastore.store_room.called) - self.assertFalse(self.federation.get_state_for_room.called) - - @defer.inlineCallbacks - def test_simple_join(self): - room_id = "!foo:red" - user_id = "@bob:red" - user = UserID.from_string(user_id) - - join_signal_observer = Mock() - self.distributor.observe("user_joined_room", join_signal_observer) - - builder = self.hs.get_event_builder_factory().new({ - "type": EventTypes.Member, - "sender": user_id, - "state_key": user_id, - "room_id": room_id, - "content": {"membership": Membership.JOIN}, - }) - - self.datastore.get_latest_events_in_room.return_value = ( - defer.succeed([]) - ) - self.datastore.get_current_state.return_value = {} - self.datastore.get_state_for_events = lambda event_ids,types: {x: {} for x in event_ids} - - def annotate(_): - ctx = Mock() - ctx.current_state = { - (EventTypes.Member, "@bob:red"): self._create_member( - user_id="@bob:red", - room_id=room_id, - membership=Membership.INVITE - ), - } - ctx.prev_state_events = [] - - return defer.succeed(ctx) - - self.state_handler.compute_event_context.side_effect = annotate - - def add_auth(_, ctx): - ctx.auth_events = ctx.current_state[ - (EventTypes.Member, "@bob:red") - ] - - return defer.succeed(True) - self.auth.add_auth_events.side_effect = add_auth - - room_handler = self.room_member_handler - event, context = yield room_handler._create_new_client_event( - builder - ) - - # Actual invocation - yield room_handler.send_membership_event(event, context) - - self.federation.handle_new_event.assert_called_once_with( - event, destinations=set() - ) - - self.datastore.persist_event.assert_called_once_with( - event, context=context - ) - self.notifier.on_new_room_event.assert_called_once_with( - event, 1, 1, extra_users=[user] - ) - - join_signal_observer.assert_called_with( - user=user, room_id=room_id - ) - - def _create_member(self, user_id, room_id, membership=Membership.JOIN): - builder = self.hs.get_event_builder_factory().new({ - "type": EventTypes.Member, - "sender": user_id, - "state_key": user_id, - "room_id": room_id, - "content": {"membership": membership}, - }) - - return builder.build() - - @defer.inlineCallbacks - def test_simple_leave(self): - room_id = "!foo:red" - user_id = "@bob:red" - user = UserID.from_string(user_id) - - builder = self.hs.get_event_builder_factory().new({ - "type": EventTypes.Member, - "sender": user_id, - "state_key": user_id, - "room_id": room_id, - "content": {"membership": Membership.LEAVE}, - }) - - self.datastore.get_latest_events_in_room.return_value = ( - defer.succeed([]) - ) - self.datastore.get_current_state.return_value = {} - self.datastore.get_state_for_events = lambda event_ids,types: {x: {} for x in event_ids} - - def annotate(_): - ctx = Mock() - ctx.current_state = { - (EventTypes.Member, "@bob:red"): self._create_member( - user_id="@bob:red", - room_id=room_id, - membership=Membership.JOIN - ), - } - ctx.prev_state_events = [] - - return defer.succeed(ctx) - - self.state_handler.compute_event_context.side_effect = annotate - - def add_auth(_, ctx): - ctx.auth_events = ctx.current_state[ - (EventTypes.Member, "@bob:red") - ] - - return defer.succeed(True) - self.auth.add_auth_events.side_effect = add_auth - - room_handler = self.room_member_handler - event, context = yield room_handler._create_new_client_event( - builder - ) - - leave_signal_observer = Mock() - self.distributor.observe("user_left_room", leave_signal_observer) - - # Actual invocation - yield room_handler.send_membership_event(event, context) - - self.federation.handle_new_event.assert_called_once_with( - event, destinations=set(['red']) - ) - - self.datastore.persist_event.assert_called_once_with( - event, context=context - ) - self.notifier.on_new_room_event.assert_called_once_with( - event, 1, 1, extra_users=[user] - ) - - leave_signal_observer.assert_called_with( - user=user, room_id=room_id - ) - - -class RoomCreationTest(unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - self.hostname = "red" - - hs = yield setup_test_homeserver( - self.hostname, - datastore=NonCallableMock(spec_set=[ - "store_room", - "snapshot_room", - "persist_event", - "get_joined_hosts_for_room", - ]), - http_client=NonCallableMock(spec_set=[]), - notifier=NonCallableMock(spec_set=["on_new_room_event"]), - handlers=NonCallableMock(spec_set=[ - "room_creation_handler", - "message_handler", - ]), - auth=NonCallableMock(spec_set=["check", "add_auth_events"]), - ratelimiter=NonCallableMock(spec_set=[ - "send_message", - ]), - ) - - self.federation = NonCallableMock(spec_set=[ - "handle_new_event", - ]) - - self.handlers = hs.get_handlers() - - self.handlers.room_creation_handler = RoomCreationHandler(hs) - self.room_creation_handler = self.handlers.room_creation_handler - - self.message_handler = self.handlers.message_handler - - self.ratelimiter = hs.get_ratelimiter() - self.ratelimiter.send_message.return_value = (True, 0) - - @defer.inlineCallbacks - def test_room_creation(self): - user_id = "@foo:red" - room_id = "!bobs_room:red" - config = {"visibility": "private"} - - yield self.room_creation_handler.create_room( - user_id=user_id, - room_id=room_id, - config=config, - ) - - self.assertTrue(self.message_handler.create_and_send_event.called) - - event_dicts = [ - e[0][0] - for e in self.message_handler.create_and_send_event.call_args_list - ] - - self.assertTrue(len(event_dicts) > 3) - - self.assertDictContainsSubset( - { - "type": EventTypes.Create, - "sender": user_id, - "room_id": room_id, - }, - event_dicts[0] - ) - - self.assertEqual(user_id, event_dicts[0]["content"]["creator"]) - - self.assertDictContainsSubset( - { - "type": EventTypes.Member, - "sender": user_id, - "room_id": room_id, - "state_key": user_id, - }, - event_dicts[1] - ) - - self.assertEqual( - Membership.JOIN, - event_dicts[1]["content"]["membership"] - ) -- cgit 1.4.1 From 5cd2126a6a6e23ec1f694cfba7be7bbf29bd1506 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jan 2016 16:48:37 +0000 Subject: Remove dead code --- synapse/storage/push_rule.py | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f210e6c14d..2adfefd994 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -14,7 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from synapse.util.caches.descriptors import cachedInlineCallbacks from twisted.internet import defer import logging @@ -60,27 +60,6 @@ class PushRuleStore(SQLBaseStore): r['rule_id']: False if r['enabled'] == 0 else True for r in results }) - @cached() - def _get_push_rules_enabled_for_user(self, user_id): - def f(txn): - sql = ( - "SELECT pr.*" - " FROM push_rules AS pr" - " LEFT JOIN push_rules_enable AS pre" - " ON pr.user_name = pre.user_name AND pr.rule_id = pre.rule_id" - " WHERE pr.user_name = ?" - " AND (pre.enabled IS NULL OR pre.enabled = 1)" - " ORDER BY pr.priority_class DESC, pr.priority DESC" - ) - txn.execute(sql, (user_id,)) - return self.cursor_to_dict(txn) - - return self.runInteraction( - "_get_push_rules_enabled_for_user", f - ) - - # @cachedList(cache=_get_push_rules_enabled_for_user.cache, list_name="user_ids", - # num_args=1, inlineCallbacks=True) @defer.inlineCallbacks def bulk_get_push_rules(self, user_ids): if not user_ids: -- cgit 1.4.1 From 3adcc4c86aade29f502b7245acc2353326a62256 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 19 Jan 2016 11:35:50 +0000 Subject: Return highlight_count in /sync --- synapse/handlers/sync.py | 42 ++++++++++++++++++++++++++++++----- synapse/rest/client/v2_alpha/sync.py | 1 + synapse/storage/event_push_actions.py | 5 +++-- 3 files changed, 40 insertions(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 52202d8e63..66e57bd4d6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -55,6 +55,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ "ephemeral", "account_data", "unread_notification_count", + "unread_highlight_count", ])): __slots__ = [] @@ -292,9 +293,14 @@ class SyncHandler(BaseHandler): notifs = yield self.unread_notifs_for_room_id( room_id, sync_config, ephemeral_by_room ) + notif_count = None + highlight_count = None if notifs is not None: notif_count = len(notifs) + highlight_count = len([ + 1 for notif in notifs if _action_has_highlight(notif["actions"]) + ]) current_state = yield self.get_state_at(room_id, now_token) @@ -307,6 +313,7 @@ class SyncHandler(BaseHandler): room_id, tags_by_room, account_data_by_room ), unread_notification_count=notif_count, + unread_highlight_count=highlight_count, )) def account_data_for_user(self, account_data): @@ -529,9 +536,14 @@ class SyncHandler(BaseHandler): notifs = yield self.unread_notifs_for_room_id( room_id, sync_config, all_ephemeral_by_room ) + notif_count = None + highlight_count = None if notifs is not None: notif_count = len(notifs) + highlight_count = len([ + 1 for notif in notifs if _action_has_highlight(notif["actions"]) + ]) just_joined = yield self.check_joined_room(sync_config, state) if just_joined: @@ -553,7 +565,8 @@ class SyncHandler(BaseHandler): account_data=self.account_data_for_room( room_id, tags_by_room, account_data_by_room ), - unread_notification_count=notif_count + unread_notification_count=notif_count, + unread_highlight_count=highlight_count, ) logger.debug("Result for room %s: %r", room_id, room_sync) @@ -692,9 +705,14 @@ class SyncHandler(BaseHandler): notifs = yield self.unread_notifs_for_room_id( room_id, sync_config, ephemeral_by_room ) + notif_count = None + highlight_count = None if notifs is not None: notif_count = len(notifs) + highlight_count = len([ + 1 for notif in notifs if _action_has_highlight(notif["actions"]) + ]) room_sync = JoinedSyncResult( room_id=room_id, @@ -705,6 +723,7 @@ class SyncHandler(BaseHandler): room_id, tags_by_room, account_data_by_room ), unread_notification_count=notif_count, + unread_highlight_count=highlight_count, ) logger.debug("Room sync: %r", room_sync) @@ -850,8 +869,19 @@ class SyncHandler(BaseHandler): notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id ) - else: - # There is no new information in this period, so your notification - # count is whatever it was last time. - defer.returnValue(None) - defer.returnValue(notifs) + defer.returnValue(notifs) + + # There is no new information in this period, so your notification + # count is whatever it was last time. + defer.returnValue(None) + + +def _action_has_highlight(actions): + for action in actions: + try: + if action.get("set_tweak", None) == "highlight": + return action.get("value", True) + except AttributeError: + pass + + return False diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 826f9db189..e300ced214 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -313,6 +313,7 @@ class SyncRestServlet(RestServlet): ephemeral_events = filter.filter_room_ephemeral(room.ephemeral) result["ephemeral"] = {"events": ephemeral_events} result["unread_notification_count"] = room.unread_notification_count + result["unread_highlight_count"] = room.unread_highlight_count return result diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index d99171ee87..6b7cebc9ce 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -17,7 +17,7 @@ from ._base import SQLBaseStore from twisted.internet import defer import logging -import simplejson as json +import ujson as json logger = logging.getLogger(__name__) @@ -84,7 +84,8 @@ class EventPushActionsStore(SQLBaseStore): ) ) return [ - {"event_id": row[0], "actions": row[1]} for row in txn.fetchall() + {"event_id": row[0], "actions": json.loads(row[1])} + for row in txn.fetchall() ] ret = yield self.runInteraction( -- cgit 1.4.1 From 5a7d1ecffcab7a94caf70471a2eec56eb868573c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 19 Jan 2016 16:01:05 +0000 Subject: Add regex cache. Only caculate push actions for users that have sent read receipts, and are on that server --- synapse/handlers/_base.py | 2 +- synapse/handlers/federation.py | 2 +- synapse/push/action_generator.py | 7 ++++--- synapse/push/bulk_push_rule_evaluator.py | 15 ++++++++++----- synapse/push/push_rule_evaluator.py | 20 +++++++++++++++++--- synapse/server.py | 4 ++++ synapse/storage/receipts.py | 14 +++++++++++++- 7 files changed, 50 insertions(+), 14 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 2d1167296a..5c7617de44 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -266,7 +266,7 @@ class BaseHandler(object): event, context=context ) - action_generator = ActionGenerator(self.store) + action_generator = ActionGenerator(self.hs) yield action_generator.handle_push_actions_for_event( event, self ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4b94940e99..6c19d6ae8c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -245,7 +245,7 @@ class FederationHandler(BaseHandler): yield user_joined_room(self.distributor, user, event.room_id) if not backfilled and not event.internal_metadata.is_outlier(): - action_generator = ActionGenerator(self.store) + action_generator = ActionGenerator(self.hs) yield action_generator.handle_push_actions_for_event( event, self ) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 4cf94f6c61..1d2e558f9a 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -25,8 +25,9 @@ logger = logging.getLogger(__name__) class ActionGenerator: - def __init__(self, store): - self.store = store + def __init__(self, hs): + self.hs = hs + self.store = hs.get_datastore() # really we want to get all user ids and all profile tags too, # since we want the actions for each profile tag for every user and # also actions for a client with no profile tag for each user. @@ -42,7 +43,7 @@ class ActionGenerator: ) bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( - event.room_id, self.store + event.room_id, self.hs, self.store ) actions_by_user = yield bulk_evaluator.action_for_event_by_user(event, handler) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index efd686fa6e..1000ae6301 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -36,6 +36,7 @@ def decode_rule_json(rule): @defer.inlineCallbacks def _get_rules(room_id, user_ids, store): rules_by_user = yield store.bulk_get_push_rules(user_ids) + rules_by_user = { uid: baserules.list_with_base_rules([ decode_rule_json(rule_list) @@ -47,12 +48,16 @@ def _get_rules(room_id, user_ids, store): @defer.inlineCallbacks -def evaluator_for_room_id(room_id, store): - users = yield store.get_users_in_room(room_id) - rules_by_user = yield _get_rules(room_id, users, store) +def evaluator_for_room_id(room_id, hs, store): + results = yield store.get_receipts_for_room(room_id, "m.read") + user_ids = [ + row["user_id"] for row in results + if hs.is_mine_id(row["user_id"]) + ] + rules_by_user = yield _get_rules(room_id, user_ids, store) defer.returnValue(BulkPushRuleEvaluator( - room_id, rules_by_user, users, store + room_id, rules_by_user, user_ids, store )) @@ -129,7 +134,7 @@ def _condition_checker(evaluator, conditions, uid, display_name, cache): res = evaluator.matches(cond, uid, display_name, None) if _id: - cache[_id] = res + cache[_id] = bool(res) if not res: return False diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 4654994d2d..753b6469e2 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -22,6 +22,7 @@ import simplejson as json import re from synapse.types import UserID +from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -277,18 +278,18 @@ def _glob_matches(glob, value, word_boundary=False): ) if word_boundary: r = r"\b%s\b" % (r,) - r = re.compile(r, flags=re.IGNORECASE) + r = _compile_regex(r) return r.search(value) else: r = r + "$" - r = re.compile(r, flags=re.IGNORECASE) + r = _compile_regex(r) return r.match(value) elif word_boundary: r = re.escape(glob) r = r"\b%s\b" % (r,) - r = re.compile(r, flags=re.IGNORECASE) + r = _compile_regex(r) return r.search(value) else: @@ -306,3 +307,16 @@ def _flatten_dict(d, prefix=[], result={}): _flatten_dict(value, prefix=(prefix+[key]), result=result) return result + + +regex_cache = LruCache(100000) + + +def _compile_regex(regex_str): + r = regex_cache.get(regex_str, None) + if r: + return r + + r = re.compile(regex_str, flags=re.IGNORECASE) + regex_cache[regex_str] = r + return r diff --git a/synapse/server.py b/synapse/server.py index ffd4f936d0..63f9059837 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -96,6 +96,7 @@ class BaseHomeServer(object): hostname : The hostname for the server. """ self.hostname = hostname + self.hostname_with_colon = ":" + hostname self._building = {} # Other kwargs are explicit dependencies @@ -139,6 +140,9 @@ class BaseHomeServer(object): def is_mine(self, domain_specific_string): return domain_specific_string.domain == self.hostname + def is_mine_id(self, string): + return string.endswith(self.hostname_with_colon) + # Build magic accessors for every dependency for depname in BaseHomeServer.DEPENDENCIES: BaseHomeServer._make_dependency_method(depname) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 21cf88b3da..c80e576620 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -14,7 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList +from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached from synapse.util.caches import cache_counter, caches_by_name from twisted.internet import defer @@ -33,6 +33,18 @@ class ReceiptsStore(SQLBaseStore): self._receipts_stream_cache = _RoomStreamChangeCache() + @cached(num_args=2) + def get_receipts_for_room(self, room_id, receipt_type): + return self._simple_select_list( + table="receipts_linearized", + keyvalues={ + "room_id": room_id, + "receipt_type": receipt_type, + }, + retcols=("user_id", "event_id"), + desc="get_receipts_for_room", + ) + @defer.inlineCallbacks def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): """Get receipts for multiple rooms for sending to clients. -- cgit 1.4.1 From 3fa344c0376045d1be396701978ba46c552065b6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 20 Jan 2016 15:30:31 +0000 Subject: Add storage function to get all receipts for a user. Also add some cache invalidation to the receipts storage because there wasn't any, and remove a method that was unused. --- synapse/push/__init__.py | 28 +++++------------------ synapse/storage/receipts.py | 55 ++++++++++++++++++++++++++++----------------- 2 files changed, 40 insertions(+), 43 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index ffae02a285..9a4af2b3ca 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -16,9 +16,8 @@ from twisted.internet import defer from synapse.streams.config import PaginationConfig -from synapse.types import StreamToken, UserID +from synapse.types import StreamToken from synapse.api.constants import Membership -from synapse.api.filtering import FilterCollection import synapse.util.async import push_rule_evaluator as push_rule_evaluator @@ -290,22 +289,9 @@ class Pusher(object): membership_list=(Membership.INVITE, Membership.JOIN) ) - user_is_guest = yield self.store.is_guest(self.user_id) - - # XXX: importing inside method to break circular dependency. - # should sort out the mess by moving all this logic out of - # push/__init__.py and probably moving the logic we use from the sync - # handler to somewhere more amenable to re-use. - from synapse.handlers.sync import SyncConfig - sync_config = SyncConfig( - user=UserID.from_string(self.user_id), - filter=FilterCollection({}), - is_guest=user_is_guest, - ) - now_token = yield self.hs.get_event_sources().get_current_token() - sync_handler = self.hs.get_handlers().sync_handler - _, ephemeral_by_room = yield sync_handler.ephemeral_by_room( - sync_config, now_token + my_receipts_by_room = yield self.store.get_receipts_for_user( + self.user_id, + "m.read", ) badge = 0 @@ -314,11 +300,9 @@ class Pusher(object): if r.membership == Membership.INVITE: badge += 1 else: - last_unread_event_id = sync_handler.last_read_event_id_for_room_and_user( - r.room_id, self.user_id, ephemeral_by_room - ) + if r.room_id in my_receipts_by_room: + last_unread_event_id = my_receipts_by_room[r.room_id] - if last_unread_event_id: notifs = yield ( self.store.get_unread_event_push_actions_by_room_for_user( r.room_id, self.user_id, last_unread_event_id diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c80e576620..018140f47a 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -45,6 +45,21 @@ class ReceiptsStore(SQLBaseStore): desc="get_receipts_for_room", ) + @cachedInlineCallbacks(num_args=2) + def get_receipts_for_user(self, user_id, receipt_type): + def f(txn): + sql = ( + "SELECT room_id,event_id " + "FROM receipts_linearized " + "WHERE user_id = ? AND receipt_type = ? " + ) + txn.execute(sql, (user_id, receipt_type)) + return txn.fetchall() + + defer.returnValue(dict( + (yield self.runInteraction("get_receipts_for_user", f)) + )) + @defer.inlineCallbacks def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): """Get receipts for multiple rooms for sending to clients. @@ -194,29 +209,16 @@ class ReceiptsStore(SQLBaseStore): def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_max_token(self) - @cachedInlineCallbacks() - def get_graph_receipts_for_room(self, room_id): - """Get receipts for sending to remote servers. - """ - rows = yield self._simple_select_list( - table="receipts_graph", - keyvalues={"room_id": room_id}, - retcols=["receipt_type", "user_id", "event_id"], - desc="get_linearized_receipts_for_room", - ) - - result = {} - for row in rows: - result.setdefault( - row["user_id"], {} - ).setdefault( - row["receipt_type"], [] - ).append(row["event_id"]) - - defer.returnValue(result) - def insert_linearized_receipt_txn(self, txn, room_id, receipt_type, user_id, event_id, data, stream_id): + txn.call_after( + self.get_receipts_for_room.invalidate, (room_id, receipt_type) + ) + txn.call_after( + self.get_receipts_for_user.invalidate, (user_id, receipt_type) + ) + # FIXME: This shouldn't invalidate the whole cache + txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts @@ -324,6 +326,7 @@ class ReceiptsStore(SQLBaseStore): ) max_persisted_id = yield self._stream_id_gen.get_max_token(self) + defer.returnValue((stream_id, max_persisted_id)) def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, @@ -336,6 +339,16 @@ class ReceiptsStore(SQLBaseStore): def insert_graph_receipt_txn(self, txn, room_id, receipt_type, user_id, event_ids, data): + txn.call_after( + self.get_receipts_for_room.invalidate, (room_id, receipt_type) + ) + txn.call_after( + self.get_receipts_for_user.invalidate, (user_id, receipt_type) + ) + # FIXME: This shouldn't invalidate the whole cache + txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) + + self._simple_delete_txn( txn, table="receipts_graph", -- cgit 1.4.1 From d4315bbf6bf2f7ab979761df10ad1d32d07a89fa Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 20 Jan 2016 15:33:27 +0000 Subject: Add index by user id on receipts_linearized --- .../storage/schema/delta/28/receipts_user_id_index.sql | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 synapse/storage/schema/delta/28/receipts_user_id_index.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/28/receipts_user_id_index.sql b/synapse/storage/schema/delta/28/receipts_user_id_index.sql new file mode 100644 index 0000000000..452a1b3c6c --- /dev/null +++ b/synapse/storage/schema/delta/28/receipts_user_id_index.sql @@ -0,0 +1,18 @@ +/* Copyright 2015, 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX receipts_linearized_user ON receipts_linearized( + user_id +); -- cgit 1.4.1 From 367cfab4e633c892e0d662e3abcd8e1a9c7f2daf Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 20 Jan 2016 16:05:09 +0000 Subject: peppate --- synapse/storage/receipts.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 018140f47a..c4232bdc65 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -57,7 +57,7 @@ class ReceiptsStore(SQLBaseStore): return txn.fetchall() defer.returnValue(dict( - (yield self.runInteraction("get_receipts_for_user", f)) + (yield self.runInteraction("get_receipts_for_user", f)) )) @defer.inlineCallbacks @@ -212,7 +212,7 @@ class ReceiptsStore(SQLBaseStore): def insert_linearized_receipt_txn(self, txn, room_id, receipt_type, user_id, event_id, data, stream_id): txn.call_after( - self.get_receipts_for_room.invalidate, (room_id, receipt_type) + self.get_receipts_for_room.invalidate, (room_id, receipt_type) ) txn.call_after( self.get_receipts_for_user.invalidate, (user_id, receipt_type) @@ -348,7 +348,6 @@ class ReceiptsStore(SQLBaseStore): # FIXME: This shouldn't invalidate the whole cache txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) - self._simple_delete_txn( txn, table="receipts_graph", -- cgit 1.4.1 From 8f66fe639211988f66778d6b8a40a2a5fd2cfaa1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jan 2016 15:02:07 +0000 Subject: Cache get_unread_event_push_actions_by_room_for_user --- synapse/storage/event_push_actions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 6b7cebc9ce..aa61cf5569 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -15,6 +15,7 @@ from ._base import SQLBaseStore from twisted.internet import defer +from synapse.util.caches.descriptors import cachedInlineCallbacks import logging import ujson as json @@ -46,7 +47,7 @@ class EventPushActionsStore(SQLBaseStore): values ) - @defer.inlineCallbacks + @cachedInlineCallbacks(num_args=3) def get_unread_event_push_actions_by_room_for_user( self, room_id, user_id, last_read_event_id ): -- cgit 1.4.1 From 42eae4634f4bab5649298a65889a4b1a3149d586 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 22 Jan 2016 11:22:48 +0000 Subject: Use new invalidate_many cache invalidation to invalidate the event_push_actions cache appropriately. --- synapse/storage/event_push_actions.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index aa61cf5569..6a212c630b 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -40,14 +40,20 @@ class EventPushActionsStore(SQLBaseStore): 'actions': json.dumps(actions) }) + def f(txn): + for uid, _, __ in tuples: + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (event.room_id, uid) + ) + return self._simple_insert_many_txn(txn, "event_push_actions", values) + yield self.runInteraction( "set_actions_for_event_and_users", - self._simple_insert_many_txn, - "event_push_actions", - values + f, ) - @cachedInlineCallbacks(num_args=3) + @cachedInlineCallbacks(num_args=3, lru=True) def get_unread_event_push_actions_by_room_for_user( self, room_id, user_id, last_read_event_id ): @@ -98,6 +104,11 @@ class EventPushActionsStore(SQLBaseStore): @defer.inlineCallbacks def remove_push_actions_for_event_id(self, room_id, event_id): def f(txn): + # Sad that we have to blow away the cache for the whole room here + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (room_id,) + ) txn.execute( "DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?", (room_id, event_id) -- cgit 1.4.1 From 10f76dc5da47c49a4191d8113b3c0615224eb9fd Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 22 Jan 2016 12:10:33 +0000 Subject: Make LRU cache not default to treecache & add options to use it --- synapse/storage/event_push_actions.py | 2 +- synapse/util/caches/descriptors.py | 20 ++++++++++++++------ synapse/util/caches/lrucache.py | 9 +++++---- tests/util/test_lrucache.py | 3 ++- 4 files changed, 22 insertions(+), 12 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 6a212c630b..a05c4f84cf 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -53,7 +53,7 @@ class EventPushActionsStore(SQLBaseStore): f, ) - @cachedInlineCallbacks(num_args=3, lru=True) + @cachedInlineCallbacks(num_args=3, lru=True, tree=True) def get_unread_event_push_actions_by_room_for_user( self, room_id, user_id, last_read_event_id ): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index f4a2b4e590..88e56e3302 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -17,6 +17,7 @@ import logging from synapse.util.async import ObservableDeferred from synapse.util import unwrapFirstError from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.treecache import TreeCache from . import caches_by_name, DEBUG_CACHES, cache_counter @@ -36,9 +37,12 @@ _CacheSentinel = object() class Cache(object): - def __init__(self, name, max_entries=1000, keylen=1, lru=True): + def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False): if lru: - self.cache = LruCache(max_size=max_entries, keylen=keylen) + cache_type = TreeCache if tree else dict + self.cache = LruCache( + max_size=max_entries, keylen=keylen, cache_type=cache_type + ) self.max_entries = None else: self.cache = OrderedDict() @@ -131,7 +135,7 @@ class CacheDescriptor(object): which can be used to insert values into the cache specifically, without calling the calculation function. """ - def __init__(self, orig, max_entries=1000, num_args=1, lru=True, + def __init__(self, orig, max_entries=1000, num_args=1, lru=True, tree=False, inlineCallbacks=False): self.orig = orig @@ -143,6 +147,7 @@ class CacheDescriptor(object): self.max_entries = max_entries self.num_args = num_args self.lru = lru + self.tree = tree self.arg_names = inspect.getargspec(orig).args[1:num_args+1] @@ -158,6 +163,7 @@ class CacheDescriptor(object): max_entries=self.max_entries, keylen=self.num_args, lru=self.lru, + tree=self.tree, ) def __get__(self, obj, objtype=None): @@ -331,21 +337,23 @@ class CacheListDescriptor(object): return wrapped -def cached(max_entries=1000, num_args=1, lru=True): +def cached(max_entries=1000, num_args=1, lru=True, tree=False): return lambda orig: CacheDescriptor( orig, max_entries=max_entries, num_args=num_args, - lru=lru + lru=lru, + tree=tree, ) -def cachedInlineCallbacks(max_entries=1000, num_args=1, lru=False): +def cachedInlineCallbacks(max_entries=1000, num_args=1, lru=False, tree=False): return lambda orig: CacheDescriptor( orig, max_entries=max_entries, num_args=num_args, lru=lru, + tree=tree, inlineCallbacks=True, ) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 0feceb298a..23e86ec110 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -17,8 +17,6 @@ from functools import wraps import threading -from synapse.util.caches.treecache import TreeCache - def enumerate_leaves(node, depth): if depth == 0: @@ -31,8 +29,8 @@ def enumerate_leaves(node, depth): class LruCache(object): """Least-recently-used cache.""" - def __init__(self, max_size, keylen): - cache = TreeCache() + def __init__(self, max_size, keylen, cache_type=dict): + cache = cache_type() self.size = 0 list_root = [] list_root[:] = [list_root, list_root, None, None] @@ -124,6 +122,9 @@ class LruCache(object): @synchronized def cache_del_multi(key): + """ + This will only work if constructed with cache_type=TreeCache + """ popped = cache.pop(key) if popped is None: return diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index fca2e98983..bcad1d4258 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -17,6 +17,7 @@ from .. import unittest from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.treecache import TreeCache class LruCacheTestCase(unittest.TestCase): @@ -54,7 +55,7 @@ class LruCacheTestCase(unittest.TestCase): self.assertEquals(cache.pop(("key",)), None) def test_del_multi(self): - cache = LruCache(4, 2) + cache = LruCache(4, 2, cache_type=TreeCache) cache[("animal", "cat")] = "mew" cache[("animal", "dog")] = "woof" cache[("vehicles", "car")] = "vroom" -- cgit 1.4.1 From 52bdd1b8342e0bc20739d277c3c42bddbac77891 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 22 Jan 2016 14:58:19 +0000 Subject: Overlay the push_rules_enabled map for users, otherwise they won't be able to disable server default rules. --- synapse/push/bulk_push_rule_evaluator.py | 14 ++++++++++++++ synapse/storage/push_rule.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index b91c165e2b..baa32a4e18 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -36,6 +36,7 @@ def decode_rule_json(rule): @defer.inlineCallbacks def _get_rules(room_id, user_ids, store): rules_by_user = yield store.bulk_get_push_rules(user_ids) + rules_enabled_by_user = yield store.bulk_get_push_rules_enabled(user_ids) rules_by_user = { uid: baserules.list_with_base_rules([ @@ -44,6 +45,19 @@ def _get_rules(room_id, user_ids, store): ]) for uid in user_ids } + + # We apply the rules-enabled map here: bulk_get_push_rules doesn't + # fetch disabled rules, but this won't account for any server default + # rules the user has disabled, so we need to do this too. + for uid in user_ids: + user_enabled_map = rules_enabled_by_user[uid] + + for rule in rules_by_user[uid]: + rule_id = rule['rule_id'] + + if rule_id in user_enabled_map: + rule['enabled'] = user_enabled_map[rule_id] + defer.returnValue(rules_by_user) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 2adfefd994..35ec7e8cef 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -93,6 +93,35 @@ class PushRuleStore(SQLBaseStore): results.setdefault(row['user_name'], []).append(row) defer.returnValue(results) + @defer.inlineCallbacks + def bulk_get_push_rules_enabled(self, user_ids): + if not user_ids: + defer.returnValue({}) + + batch_size = 100 + + def f(txn, user_ids_to_fetch): + sql = ( + "SELECT user_name, rule_id, enabled" + " FROM push_rules_enable" + " WHERE user_name" + " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" + ) + txn.execute(sql, user_ids_to_fetch) + return self.cursor_to_dict(txn) + + results = {} + + chunks = [user_ids[i:i+batch_size] for i in xrange(0, len(user_ids), batch_size)] + for batch_user_ids in chunks: + rows = yield self.runInteraction( + "bulk_get_push_rules_enabled", f, batch_user_ids + ) + + for row in rows: + results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] + defer.returnValue(results) + @defer.inlineCallbacks def add_push_rule(self, before, after, **kwargs): vals = kwargs -- cgit 1.4.1 From ddd25def01e3909a34c52954100763bb2a91f648 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 13:36:02 +0000 Subject: Implement a _simple_select_many_batch --- synapse/storage/_base.py | 67 ++++++++++++++++++++++++++++++++++++++++++++ synapse/storage/presence.py | 35 ++++++++++++----------- synapse/storage/push_rule.py | 63 +++++++++++++---------------------------- 3 files changed, 105 insertions(+), 60 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 183a752387..897c5d8d73 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -629,6 +629,73 @@ class SQLBaseStore(object): return self.cursor_to_dict(txn) + @defer.inlineCallbacks + def _simple_select_many_batch(self, table, column, iterable, retcols, + keyvalues={}, desc="_simple_select_many_batch", + batch_size=100): + """Executes a SELECT query on the named table, which may return zero or + more rows, returning the result as a list of dicts. + + Filters rows by if value of `column` is in `iterable`. + + Args: + txn : Transaction object + table : string giving the table name + column : column name to test for inclusion against `iterable` + iterable : list + keyvalues : dict of column names and values to select the rows with + retcols : list of strings giving the names of the columns to return + """ + results = [] + + chunks = [iterable[i:i+batch_size] for i in xrange(0, len(iterable), batch_size)] + for chunk in chunks: + rows = yield self.runInteraction( + desc, + self._simple_select_many_txn, + table, column, chunk, keyvalues, retcols + ) + + results.extend(rows) + + defer.returnValue(results) + + def _simple_select_many_txn(self, txn, table, column, iterable, keyvalues, retcols): + """Executes a SELECT query on the named table, which may return zero or + more rows, returning the result as a list of dicts. + + Filters rows by if value of `column` is in `iterable`. + + Args: + txn : Transaction object + table : string giving the table name + column : column name to test for inclusion against `iterable` + iterable : list + keyvalues : dict of column names and values to select the rows with + retcols : list of strings giving the names of the columns to return + """ + sql = "SELECT %s FROM %s" % (", ".join(retcols), table) + + clauses = [] + values = [] + clauses.append( + "%s IN (%s)" % (column, ",".join("?" for _ in iterable)) + ) + values.extend(iterable) + + for key, value in keyvalues.items(): + clauses.append("%s = ?" % (key,)) + values.append(value) + + if clauses: + sql = "%s WHERE %s" % ( + sql, + " AND ".join(clauses), + ) + + txn.execute(sql, values) + return self.cursor_to_dict(txn) + def _simple_update_one(self, table, keyvalues, updatevalues, desc="_simple_update_one"): """Executes an UPDATE query on the named table, setting new values for diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 1095d52ace..9b3aecaf8c 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -48,24 +48,25 @@ class PresenceStore(SQLBaseStore): desc="get_presence_state", ) - @cachedList(get_presence_state.cache, list_name="user_localparts") + @cachedList(get_presence_state.cache, list_name="user_localparts", + inlineCallbacks=True) def get_presence_states(self, user_localparts): - def f(txn): - results = {} - for user_localpart in user_localparts: - res = self._simple_select_one_txn( - txn, - table="presence", - keyvalues={"user_id": user_localpart}, - retcols=["state", "status_msg", "mtime"], - allow_none=True, - ) - if res: - results[user_localpart] = res - - return results - - return self.runInteraction("get_presence_states", f) + rows = yield self._simple_select_many_batch( + table="presence", + column="user_id", + iterable=user_localparts, + retcols=("user_id", "state", "status_msg", "mtime",), + desc="get_presence_states", + ) + + defer.returnValue({ + row["user_id"]: { + "state": row["state"], + "status_msg": row["status_msg"], + "mtime": row["mtime"], + } + for row in rows + }) def set_presence_state(self, user_localpart, new_state): res = self._simple_update_one( diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 35ec7e8cef..1f51c90ee5 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -65,32 +65,20 @@ class PushRuleStore(SQLBaseStore): if not user_ids: defer.returnValue({}) - batch_size = 100 - - def f(txn, user_ids_to_fetch): - sql = ( - "SELECT pr.*" - " FROM push_rules AS pr" - " LEFT JOIN push_rules_enable AS pre" - " ON pr.user_name = pre.user_name AND pr.rule_id = pre.rule_id" - " WHERE pr.user_name" - " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" - " AND (pre.enabled IS NULL OR pre.enabled = 1)" - " ORDER BY pr.user_name, pr.priority_class DESC, pr.priority DESC" - ) - txn.execute(sql, user_ids_to_fetch) - return self.cursor_to_dict(txn) - results = {} - chunks = [user_ids[i:i+batch_size] for i in xrange(0, len(user_ids), batch_size)] - for batch_user_ids in chunks: - rows = yield self.runInteraction( - "bulk_get_push_rules", f, batch_user_ids - ) + rows = yield self._simple_select_many_batch( + table="push_rules", + column="user_name", + iterable=user_ids, + retcols=("*",), + desc="bulk_get_push_rules", + ) + + rows.sort(key=lambda e: (-e["priority_class"], -e["priority"])) - for row in rows: - results.setdefault(row['user_name'], []).append(row) + for row in rows: + results.setdefault(row['user_name'], []).append(row) defer.returnValue(results) @defer.inlineCallbacks @@ -98,28 +86,17 @@ class PushRuleStore(SQLBaseStore): if not user_ids: defer.returnValue({}) - batch_size = 100 - - def f(txn, user_ids_to_fetch): - sql = ( - "SELECT user_name, rule_id, enabled" - " FROM push_rules_enable" - " WHERE user_name" - " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" - ) - txn.execute(sql, user_ids_to_fetch) - return self.cursor_to_dict(txn) - results = {} - chunks = [user_ids[i:i+batch_size] for i in xrange(0, len(user_ids), batch_size)] - for batch_user_ids in chunks: - rows = yield self.runInteraction( - "bulk_get_push_rules_enabled", f, batch_user_ids - ) - - for row in rows: - results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] + rows = yield self._simple_select_many_batch( + table="push_rules_enable", + column="user_name", + iterable=user_ids, + retcols=("user_name", "rule_id", "enabled",), + desc="bulk_get_push_rules_enabled", + ) + for row in rows: + results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] defer.returnValue(results) @defer.inlineCallbacks -- cgit 1.4.1 From 53cb17366391a13e8d8297c9f4fb3f77fd6b85b7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 13:53:05 +0000 Subject: Push: Use storage apis that are cached --- synapse/push/__init__.py | 30 +++++++++++++----------------- synapse/storage/roommember.py | 1 + 2 files changed, 14 insertions(+), 17 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index e6a28bd8c0..9bc0b356f4 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -17,7 +17,6 @@ from twisted.internet import defer from synapse.streams.config import PaginationConfig from synapse.types import StreamToken -from synapse.api.constants import Membership import synapse.util.async import push_rule_evaluator as push_rule_evaluator @@ -296,31 +295,28 @@ class Pusher(object): @defer.inlineCallbacks def _get_badge_count(self): - room_list = yield self.store.get_rooms_for_user_where_membership_is( - user_id=self.user_id, - membership_list=(Membership.INVITE, Membership.JOIN) - ) + invites, joins = yield defer.gatherResults([ + self.store.get_invites_for_user(self.user_id), + self.store.get_rooms_for_user(self.user_id), + ], consumeErrors=True) my_receipts_by_room = yield self.store.get_receipts_for_user( self.user_id, "m.read", ) - badge = 0 + badge = len(invites) - for r in room_list: - if r.membership == Membership.INVITE: - badge += 1 - else: - if r.room_id in my_receipts_by_room: - last_unread_event_id = my_receipts_by_room[r.room_id] + for r in joins: + if r.room_id in my_receipts_by_room: + last_unread_event_id = my_receipts_by_room[r.room_id] - notifs = yield ( - self.store.get_unread_event_push_actions_by_room_for_user( - r.room_id, self.user_id, last_unread_event_id - ) + notifs = yield ( + self.store.get_unread_event_push_actions_by_room_for_user( + r.room_id, self.user_id, last_unread_event_id ) - badge += len(notifs) + ) + badge += len(notifs) defer.returnValue(badge) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 68ac88905f..edfecced05 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -110,6 +110,7 @@ class RoomMemberStore(SQLBaseStore): membership=membership, ).addCallback(self._get_events) + @cached() def get_invites_for_user(self, user_id): """ Get all the invite events for a user Args: -- cgit 1.4.1 From 86896408b0a3c0d0c82356c8cc0bdaf3fe236b45 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:30:32 +0000 Subject: Add index to event_push_actions --- synapse/storage/schema/delta/28/event_push_actions.sql | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/28/event_push_actions.sql b/synapse/storage/schema/delta/28/event_push_actions.sql index bdf6ae3f24..4d519849df 100644 --- a/synapse/storage/schema/delta/28/event_push_actions.sql +++ b/synapse/storage/schema/delta/28/event_push_actions.sql @@ -24,3 +24,4 @@ CREATE TABLE IF NOT EXISTS event_push_actions( CREATE INDEX event_push_actions_room_id_event_id_user_id_profile_tag on event_push_actions(room_id, event_id, user_id, profile_tag); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); -- cgit 1.4.1 From 1ebf5e3d03a2a2ce9ff278b2eac07acc0f7cde66 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:53:36 +0000 Subject: Correct docstring --- synapse/storage/_base.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 897c5d8d73..304ebdc825 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -639,7 +639,6 @@ class SQLBaseStore(object): Filters rows by if value of `column` is in `iterable`. Args: - txn : Transaction object table : string giving the table name column : column name to test for inclusion against `iterable` iterable : list -- cgit 1.4.1 From aea5da0ef6f4d3907ace2c1fdba743312118660c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:59:29 +0000 Subject: Guard against empty iterables --- synapse/storage/_base.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 304ebdc825..90d7aee94a 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -647,6 +647,9 @@ class SQLBaseStore(object): """ results = [] + if not iterable: + defer.returnValue(results) + chunks = [iterable[i:i+batch_size] for i in xrange(0, len(iterable), batch_size)] for chunk in chunks: rows = yield self.runInteraction( @@ -673,6 +676,9 @@ class SQLBaseStore(object): keyvalues : dict of column names and values to select the rows with retcols : list of strings giving the names of the columns to return """ + if not iterable: + return [] + sql = "SELECT %s FROM %s" % (", ".join(retcols), table) clauses = [] -- cgit 1.4.1 From 87f9477b105b4e8216d1df186492ec6d9872967f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Jan 2016 15:51:06 +0000 Subject: Add a Homeserver.setup method. This is for setting up dependencies that require work on startup. This is useful for the DataStore that wants to read a bunch from the database before initiliazing. --- synapse/app/homeserver.py | 33 ++++++++++++++--------- synapse/server.py | 32 ++++++++++++----------- synapse/storage/__init__.py | 45 +++++++++++++++++++++++++++++--- synapse/storage/_base.py | 49 ++++++++++++++++------------------- synapse/storage/events.py | 14 ++++------ synapse/storage/receipts.py | 8 +++--- synapse/storage/stream.py | 13 ---------- synapse/storage/tags.py | 7 ----- synapse/storage/util/id_generators.py | 36 +++++++------------------ 9 files changed, 121 insertions(+), 116 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 795c655ae3..fb76be58a2 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -254,6 +254,17 @@ class SynapseHomeServer(HomeServer): except IncorrectDatabaseSetup as e: quit_with_error(e.message) + def get_db_conn(self): + db_conn = self.database_engine.module.connect( + **{ + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + ) + + self.database_engine.on_new_connection(db_conn) + return db_conn + def quit_with_error(error_string): message_lines = error_string.split("\n") @@ -390,13 +401,7 @@ def setup(config_options): logger.info("Preparing database: %s...", config.database_config['name']) try: - db_conn = database_engine.module.connect( - **{ - k: v for k, v in config.database_config.get("args", {}).items() - if not k.startswith("cp_") - } - ) - + db_conn = hs.get_db_conn() database_engine.prepare_database(db_conn) hs.run_startup_checks(db_conn, database_engine) @@ -411,13 +416,17 @@ def setup(config_options): logger.info("Database prepared in %s.", config.database_config['name']) + hs.setup() hs.start_listening() - hs.get_pusherpool().start() - hs.get_state_handler().start_caching() - hs.get_datastore().start_profiling() - hs.get_datastore().start_doing_background_updates() - hs.get_replication_layer().start_get_pdu_cache() + def start(): + hs.get_pusherpool().start() + hs.get_state_handler().start_caching() + hs.get_datastore().start_profiling() + hs.get_datastore().start_doing_background_updates() + hs.get_replication_layer().start_get_pdu_cache() + + reactor.callWhenRunning(start) return hs diff --git a/synapse/server.py b/synapse/server.py index a59e46ca2d..006e91b37c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -21,6 +21,7 @@ # Imports required for the default HomeServer() implementation from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.enterprise import adbapi +from twisted.internet import defer from synapse.federation import initialize_http_replication from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory @@ -28,7 +29,7 @@ from synapse.notifier import Notifier from synapse.api.auth import Auth from synapse.handlers import Handlers from synapse.state import StateHandler -from synapse.storage import DataStore +from synapse.storage import get_datastore from synapse.util import Clock from synapse.util.distributor import Distributor from synapse.streams.events import EventSources @@ -40,6 +41,11 @@ from synapse.api.filtering import Filtering from synapse.http.matrixfederationclient import MatrixFederationHttpClient +import logging + + +logger = logging.getLogger(__name__) + class HomeServer(object): """A basic homeserver object without lazy component builders. @@ -102,10 +108,19 @@ class HomeServer(object): self.hostname = hostname self._building = {} + self.clock = Clock() + self.distributor = Distributor() + self.ratelimiter = Ratelimiter() + # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname]) + def setup(self): + logger.info("Setting up.") + self.datastore = get_datastore(self) + logger.info("Finished setting up.") + def get_ip_from_request(self, request): # X-Forwarded-For is handled by our custom request type. return request.getClientIP() @@ -116,15 +131,9 @@ class HomeServer(object): def is_mine_id(self, string): return string.split(":", 1)[1] == self.hostname - def build_clock(self): - return Clock() - def build_replication_layer(self): return initialize_http_replication(self) - def build_datastore(self): - return DataStore(self) - def build_handlers(self): return Handlers(self) @@ -135,10 +144,9 @@ class HomeServer(object): return Auth(self) def build_http_client_context_factory(self): - config = self.get_config() return ( InsecureInterceptableContextFactory() - if config.use_insecure_ssl_client_just_for_testing_do_not_use + if self.config.use_insecure_ssl_client_just_for_testing_do_not_use else BrowserLikePolicyForHTTPS() ) @@ -157,15 +165,9 @@ class HomeServer(object): def build_state_handler(self): return StateHandler(self) - def build_distributor(self): - return Distributor() - def build_event_sources(self): return EventSources(self) - def build_ratelimiter(self): - return Ratelimiter() - def build_keyring(self): return Keyring(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7a3f6c4662..c8cab45f77 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -46,6 +46,9 @@ from .tags import TagsStore from .account_data import AccountDataStore +from util.id_generators import IdGenerator, StreamIdGenerator + + import logging @@ -58,6 +61,22 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 120*1000 +def get_datastore(hs): + logger.info("getting called!") + + conn = hs.get_db_conn() + try: + cur = conn.cursor() + cur.execute("SELECT MIN(stream_ordering) FROM events",) + rows = cur.fetchall() + min_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1 + min_token = min(min_token, -1) + + return DataStore(conn, hs, min_token) + finally: + conn.close() + + class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, PresenceStore, TransactionStore, @@ -79,18 +98,36 @@ class DataStore(RoomMemberStore, RoomStore, EventPushActionsStore ): - def __init__(self, hs): - super(DataStore, self).__init__(hs) + def __init__(self, db_conn, hs, min_stream_token): self.hs = hs - self.min_token_deferred = self._get_min_token() - self.min_token = None + self.min_stream_token = min_stream_token self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, ) + self._stream_id_gen = StreamIdGenerator( + db_conn, "events", "stream_ordering" + ) + self._receipts_id_gen = StreamIdGenerator( + db_conn, "receipts_linearized", "stream_id" + ) + self._account_data_id_gen = StreamIdGenerator( + db_conn, "account_data_max_stream_id", "stream_id" + ) + + self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) + self._state_groups_id_gen = IdGenerator("state_groups", "id", self) + self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) + self._refresh_tokens_id_gen = IdGenerator("refresh_tokens", "id", self) + self._pushers_id_gen = IdGenerator("pushers", "id", self) + self._push_rule_id_gen = IdGenerator("push_rules", "id", self) + self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) + + super(DataStore, self).__init__(hs) + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 90d7aee94a..5e77320540 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -15,13 +15,11 @@ import logging from synapse.api.errors import StoreError -from synapse.util.logutils import log_function from synapse.util.logcontext import preserve_context_over_fn, LoggingContext from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache import synapse.metrics -from util.id_generators import IdGenerator, StreamIdGenerator from twisted.internet import defer @@ -175,16 +173,6 @@ class SQLBaseStore(object): self.database_engine = hs.database_engine - self._stream_id_gen = StreamIdGenerator("events", "stream_ordering") - self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) - self._state_groups_id_gen = IdGenerator("state_groups", "id", self) - self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) - self._refresh_tokens_id_gen = IdGenerator("refresh_tokens", "id", self) - self._pushers_id_gen = IdGenerator("pushers", "id", self) - self._push_rule_id_gen = IdGenerator("push_rules", "id", self) - self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) - self._receipts_id_gen = StreamIdGenerator("receipts_linearized", "stream_id") - def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() @@ -345,7 +333,8 @@ class SQLBaseStore(object): defer.returnValue(result) - def cursor_to_dict(self, cursor): + @staticmethod + def cursor_to_dict(cursor): """Converts a SQL cursor into an list of dicts. Args: @@ -402,8 +391,8 @@ class SQLBaseStore(object): if not or_ignore: raise - @log_function - def _simple_insert_txn(self, txn, table, values): + @staticmethod + def _simple_insert_txn(txn, table, values): keys, vals = zip(*values.items()) sql = "INSERT INTO %s (%s) VALUES(%s)" % ( @@ -414,7 +403,8 @@ class SQLBaseStore(object): txn.execute(sql, vals) - def _simple_insert_many_txn(self, txn, table, values): + @staticmethod + def _simple_insert_many_txn(txn, table, values): if not values: return @@ -537,9 +527,10 @@ class SQLBaseStore(object): table, keyvalues, retcol, allow_none=allow_none, ) - def _simple_select_one_onecol_txn(self, txn, table, keyvalues, retcol, + @classmethod + def _simple_select_one_onecol_txn(cls, txn, table, keyvalues, retcol, allow_none=False): - ret = self._simple_select_onecol_txn( + ret = cls._simple_select_onecol_txn( txn, table=table, keyvalues=keyvalues, @@ -554,7 +545,8 @@ class SQLBaseStore(object): else: raise StoreError(404, "No row found") - def _simple_select_onecol_txn(self, txn, table, keyvalues, retcol): + @staticmethod + def _simple_select_onecol_txn(txn, table, keyvalues, retcol): sql = ( "SELECT %(retcol)s FROM %(table)s WHERE %(where)s" ) % { @@ -603,7 +595,8 @@ class SQLBaseStore(object): table, keyvalues, retcols ) - def _simple_select_list_txn(self, txn, table, keyvalues, retcols): + @classmethod + def _simple_select_list_txn(cls, txn, table, keyvalues, retcols): """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of dicts. @@ -627,7 +620,7 @@ class SQLBaseStore(object): ) txn.execute(sql) - return self.cursor_to_dict(txn) + return cls.cursor_to_dict(txn) @defer.inlineCallbacks def _simple_select_many_batch(self, table, column, iterable, retcols, @@ -662,7 +655,8 @@ class SQLBaseStore(object): defer.returnValue(results) - def _simple_select_many_txn(self, txn, table, column, iterable, keyvalues, retcols): + @classmethod + def _simple_select_many_txn(cls, txn, table, column, iterable, keyvalues, retcols): """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of dicts. @@ -699,7 +693,7 @@ class SQLBaseStore(object): ) txn.execute(sql, values) - return self.cursor_to_dict(txn) + return cls.cursor_to_dict(txn) def _simple_update_one(self, table, keyvalues, updatevalues, desc="_simple_update_one"): @@ -726,7 +720,8 @@ class SQLBaseStore(object): table, keyvalues, updatevalues, ) - def _simple_update_one_txn(self, txn, table, keyvalues, updatevalues): + @staticmethod + def _simple_update_one_txn(txn, table, keyvalues, updatevalues): update_sql = "UPDATE %s SET %s WHERE %s" % ( table, ", ".join("%s = ?" % (k,) for k in updatevalues), @@ -743,7 +738,8 @@ class SQLBaseStore(object): if txn.rowcount > 1: raise StoreError(500, "More than one row matched") - def _simple_select_one_txn(self, txn, table, keyvalues, retcols, + @staticmethod + def _simple_select_one_txn(txn, table, keyvalues, retcols, allow_none=False): select_sql = "SELECT %s FROM %s WHERE %s" % ( ", ".join(retcols), @@ -784,7 +780,8 @@ class SQLBaseStore(object): raise StoreError(500, "more than one row matched") return self.runInteraction(desc, func) - def _simple_delete_txn(self, txn, table, keyvalues): + @staticmethod + def _simple_delete_txn(txn, table, keyvalues): sql = "DELETE FROM %s WHERE %s" % ( table, " AND ".join("%s = ?" % (k, ) for k in keyvalues) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index ba368a3eca..298cb9bada 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -66,11 +66,9 @@ class EventsStore(SQLBaseStore): return if backfilled: - if not self.min_token_deferred.called: - yield self.min_token_deferred - start = self.min_token - 1 - self.min_token -= len(events_and_contexts) + 1 - stream_orderings = range(start, self.min_token, -1) + start = self.min_stream_token - 1 + self.min_stream_token -= len(events_and_contexts) + 1 + stream_orderings = range(start, self.min_stream_token, -1) @contextmanager def stream_ordering_manager(): @@ -107,10 +105,8 @@ class EventsStore(SQLBaseStore): is_new_state=True, current_state=None): stream_ordering = None if backfilled: - if not self.min_token_deferred.called: - yield self.min_token_deferred - self.min_token -= 1 - stream_ordering = self.min_token + self.min_stream_token -= 1 + stream_ordering = self.min_stream_token if stream_ordering is None: stream_ordering_manager = yield self._stream_id_gen.get_next(self) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c4232bdc65..c0593e23ee 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -31,7 +31,9 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = _RoomStreamChangeCache() + self._receipts_stream_cache = _RoomStreamChangeCache( + self._receipts_id_gen.get_max_token(None) + ) @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): @@ -377,11 +379,11 @@ class _RoomStreamChangeCache(object): may have changed since that key. If the key is too old then the cache will simply return all rooms. """ - def __init__(self, size_of_cache=10000): + def __init__(self, current_key, size_of_cache=10000): self._size_of_cache = size_of_cache self._room_to_key = {} self._cache = sorteddict() - self._earliest_key = None + self._earliest_key = current_key self.name = "ReceiptsRoomChangeCache" caches_by_name[self.name] = self._cache diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 02b1913e26..e31bad258a 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -444,19 +444,6 @@ class StreamStore(SQLBaseStore): rows = txn.fetchall() return rows[0][0] if rows else 0 - @defer.inlineCallbacks - def _get_min_token(self): - row = yield self._execute( - "_get_min_token", None, "SELECT MIN(stream_ordering) FROM events" - ) - - self.min_token = row[0][0] if row and row[0] and row[0][0] else -1 - self.min_token = min(self.min_token, -1) - - logger.debug("min_token is: %s", self.min_token) - - defer.returnValue(self.min_token) - @staticmethod def _set_before_and_after(events, rows): for event, row in zip(events, rows): diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index ed9c91e5ea..4c39e07cbd 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -16,7 +16,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached from twisted.internet import defer -from .util.id_generators import StreamIdGenerator import ujson as json import logging @@ -25,12 +24,6 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): - def __init__(self, hs): - super(TagsStore, self).__init__(hs) - - self._account_data_id_gen = StreamIdGenerator( - "account_data_max_stream_id", "stream_id" - ) def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index f58bf7fd2c..5c522f4ab9 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -72,28 +72,24 @@ class StreamIdGenerator(object): with stream_id_gen.get_next_txn(txn) as stream_id: # ... persist event ... """ - def __init__(self, table, column): + def __init__(self, db_conn, table, column): self.table = table self.column = column self._lock = threading.Lock() - self._current_max = None + cur = db_conn.cursor() + self._current_max = self._get_or_compute_current_max(cur) + cur.close() + self._unfinished_ids = deque() - @defer.inlineCallbacks def get_next(self, store): """ Usage: with yield stream_id_gen.get_next as stream_id: # ... persist event ... """ - if not self._current_max: - yield store.runInteraction( - "_compute_current_max", - self._get_or_compute_current_max, - ) - with self._lock: self._current_max += 1 next_id = self._current_max @@ -108,21 +104,14 @@ class StreamIdGenerator(object): with self._lock: self._unfinished_ids.remove(next_id) - defer.returnValue(manager()) + return manager() - @defer.inlineCallbacks def get_next_mult(self, store, n): """ Usage: with yield stream_id_gen.get_next(store, n) as stream_ids: # ... persist events ... """ - if not self._current_max: - yield store.runInteraction( - "_compute_current_max", - self._get_or_compute_current_max, - ) - with self._lock: next_ids = range(self._current_max + 1, self._current_max + n + 1) self._current_max += n @@ -139,24 +128,17 @@ class StreamIdGenerator(object): for next_id in next_ids: self._unfinished_ids.remove(next_id) - defer.returnValue(manager()) + return manager() - @defer.inlineCallbacks def get_max_token(self, store): """Returns the maximum stream id such that all stream ids less than or equal to it have been successfully persisted. """ - if not self._current_max: - yield store.runInteraction( - "_compute_current_max", - self._get_or_compute_current_max, - ) - with self._lock: if self._unfinished_ids: - defer.returnValue(self._unfinished_ids[0] - 1) + return self._unfinished_ids[0] - 1 - defer.returnValue(self._current_max) + return self._current_max def _get_or_compute_current_max(self, txn): with self._lock: -- cgit 1.4.1 From 8c94833b72f27d18a57e424a0f4f0c823c3a0aa1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 27 Jan 2016 10:24:20 +0000 Subject: Fix adding push rules relative to other rules --- synapse/rest/client/v1/push_rule.py | 15 ++++++++++----- synapse/storage/push_rule.py | 3 ++- 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index cb3ec23872..96633a176c 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -66,11 +66,12 @@ class PushRuleRestServlet(ClientV1RestServlet): raise SynapseError(400, e.message) before = request.args.get("before", None) - if before and len(before): - before = before[0] + if before: + before = _namespaced_rule_id(spec, before[0]) + after = request.args.get("after", None) - if after and len(after): - after = after[0] + if after: + after = _namespaced_rule_id(spec, after[0]) try: yield self.hs.get_datastore().add_push_rule( @@ -452,11 +453,15 @@ def _strip_device_condition(rule): def _namespaced_rule_id_from_spec(spec): + return _namespaced_rule_id(spec, spec['rule_id']) + + +def _namespaced_rule_id(spec, rule_id): if spec['scope'] == 'global': scope = 'global' else: scope = 'device/%s' % (spec['profile_tag']) - return "%s/%s/%s" % (scope, spec['template'], spec['rule_id']) + return "%s/%s/%s" % (scope, spec['template'], rule_id) def _rule_id_from_namespaced(in_rule_id): diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 1f51c90ee5..f9a48171ba 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -130,7 +130,8 @@ class PushRuleStore(SQLBaseStore): def _add_push_rule_relative_txn(self, txn, user_id, **kwargs): after = kwargs.pop("after", None) - relative_to_rule = kwargs.pop("before", after) + before = kwargs.pop("before", None) + relative_to_rule = before or after res = self._simple_select_one_txn( txn, -- cgit 1.4.1 From b97f6626b6f9b91498d06a7ae113b9d20f1fc2ef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 09:54:30 +0000 Subject: Add cache to room stream --- synapse/handlers/sync.py | 42 +++++++--- synapse/storage/events.py | 2 + synapse/storage/receipts.py | 65 +-------------- synapse/storage/stream.py | 133 +++++++++++++++++++++++++++++++ synapse/util/caches/room_change_cache.py | 86 ++++++++++++++++++++ 5 files changed, 254 insertions(+), 74 deletions(-) create mode 100644 synapse/util/caches/room_change_cache.py (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 328c049b03..1fdf978313 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -514,13 +514,6 @@ class SyncHandler(BaseHandler): timeline_limit = sync_config.filter_collection.timeline_limit() - room_events, _ = yield self.store.get_room_events_stream( - sync_config.user.to_string(), - from_key=since_token.room_key, - to_key=now_token.room_key, - limit=timeline_limit + 1, - ) - tags_by_room = yield self.store.get_updated_tags( sync_config.user.to_string(), since_token.account_data_key, @@ -533,6 +526,32 @@ class SyncHandler(BaseHandler): ) ) + rooms_changed = yield self.store.get_room_changes_for_user( + sync_config.user.to_string(), since_token.room_key, now_token.room_key + ) + + room_to_events = yield self.store.get_room_events_stream_for_rooms( + room_ids=room_ids, + from_key=since_token.room_key, + to_key=now_token.room_key, + limit=timeline_limit + 1, + ) + + room_events = [ + event + for events, _ in room_to_events.values() + for event in events + ] + + room_events.extend(rooms_changed) + + # room_events, _ = yield self.store.get_room_events_stream( + # sync_config.user.to_string(), + # from_key=since_token.room_key, + # to_key=now_token.room_key, + # limit=timeline_limit + 1, + # ) + joined = [] archived = [] if len(room_events) <= timeline_limit: @@ -694,14 +713,12 @@ class SyncHandler(BaseHandler): end_key = room_key while limited and len(recents) < timeline_limit and max_repeat: - events, keys = yield self.store.get_recent_events_for_room( + events, end_key = yield self.store.get_recent_room_events_stream_for_room( room_id, limit=load_limit + 1, - from_token=since_token.room_key if since_token else None, - end_token=end_key, + from_key=since_token.room_key if since_token else None, + to_key=end_key, ) - room_key, _ = keys - end_key = "s" + room_key.split('-')[-1] loaded_recents = sync_config.filter_collection.filter_room_timeline(events) loaded_recents = yield self._filter_events_for_client( sync_config.user.to_string(), @@ -712,6 +729,7 @@ class SyncHandler(BaseHandler): recents = loaded_recents if len(events) <= load_limit: limited = False + break max_repeat -= 1 if len(recents) > timeline_limit: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 298cb9bada..d96ea3a30e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,6 +128,8 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) + logger.info("Invalidating %r at %r", event.room_id, stream_ordering) + self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) except _RollbackButIsFineException: pass diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c0593e23ee..b7a4e77748 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -16,6 +16,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached from synapse.util.caches import cache_counter, caches_by_name +from synapse.util.caches.room_change_cache import RoomStreamChangeCache from twisted.internet import defer @@ -31,8 +32,8 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = _RoomStreamChangeCache( - self._receipts_id_gen.get_max_token(None) + self._receipts_stream_cache = RoomStreamChangeCache( + "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) ) @cached(num_args=2) @@ -370,63 +371,3 @@ class ReceiptsStore(SQLBaseStore): "data": json.dumps(data), } ) - - -class _RoomStreamChangeCache(object): - """Keeps track of the stream_id of the latest change in rooms. - - Given a list of rooms and stream key, it will give a subset of rooms that - may have changed since that key. If the key is too old then the cache - will simply return all rooms. - """ - def __init__(self, current_key, size_of_cache=10000): - self._size_of_cache = size_of_cache - self._room_to_key = {} - self._cache = sorteddict() - self._earliest_key = current_key - self.name = "ReceiptsRoomChangeCache" - caches_by_name[self.name] = self._cache - - @defer.inlineCallbacks - def get_rooms_changed(self, store, room_ids, key): - """Returns subset of room ids that have had new receipts since the - given key. If the key is too old it will just return the given list. - """ - if key > (yield self._get_earliest_key(store)): - keys = self._cache.keys() - i = keys.bisect_right(key) - - result = set( - self._cache[k] for k in keys[i:] - ).intersection(room_ids) - - cache_counter.inc_hits(self.name) - else: - result = room_ids - cache_counter.inc_misses(self.name) - - defer.returnValue(result) - - @defer.inlineCallbacks - def room_has_changed(self, store, room_id, key): - """Informs the cache that the room has been changed at the given key. - """ - if key > (yield self._get_earliest_key(store)): - old_key = self._room_to_key.get(room_id, None) - if old_key: - key = max(key, old_key) - self._cache.pop(old_key, None) - self._cache[key] = room_id - - while len(self._cache) > self._size_of_cache: - k, r = self._cache.popitem() - self._earliest_key = max(k, self._earliest_key) - self._room_to_key.pop(r, None) - - @defer.inlineCallbacks - def _get_earliest_key(self, store): - if self._earliest_key is None: - self._earliest_key = yield store.get_max_receipt_stream_id() - self._earliest_key = int(self._earliest_key) - - defer.returnValue(self._earliest_key) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e31bad258a..3a32a0019a 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,6 +37,7 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.room_change_cache import RoomStreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -77,6 +78,12 @@ def upper_bound(token): class StreamStore(SQLBaseStore): + def __init__(self, hs): + super(StreamStore, self).__init__(hs) + + self._events_stream_cache = RoomStreamChangeCache( + "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) + ) @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): @@ -157,6 +164,132 @@ class StreamStore(SQLBaseStore): results = yield self.runInteraction("get_appservice_room_stream", f) defer.returnValue(results) + @defer.inlineCallbacks + def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): + from_id = RoomStreamToken.parse_stream_token(from_key).stream + + room_ids = yield self._events_stream_cache.get_rooms_changed( + self, room_ids, from_id + ) + + if not room_ids: + defer.returnValue({}) + + results = {} + room_ids = list(room_ids) + for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): + res = yield defer.gatherResults([ + self.get_recent_room_events_stream_for_room( + room_id, from_key, to_key, limit + ).addCallback(lambda r, rm: (rm, r), room_id) + for room_id in room_ids + ]) + results.update(dict(res)) + + defer.returnValue(results) + + @defer.inlineCallbacks + def get_recent_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + if from_key is not None: + from_id = RoomStreamToken.parse_stream_token(from_key).stream + else: + from_id = None + to_id = RoomStreamToken.parse_stream_token(to_key).stream + + if from_key == to_key: + defer.returnValue(([], from_key)) + + has_changed = yield self._events_stream_cache.get_room_has_changed( + room_id, from_id + ) + + if not has_changed: + defer.returnValue(([], from_key)) + + def f(txn): + if from_id is not None: + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering > ? AND stream_ordering <= ?" + " ORDER BY stream_ordering DESC LIMIT ?" + ) + txn.execute(sql, (room_id, from_id, to_id, limit)) + else: + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering <= ?" + " ORDER BY stream_ordering DESC LIMIT ?" + ) + txn.execute(sql, (room_id, to_id, limit)) + + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + [r["event_id"] for r in rows], + get_prev_content=True + ) + + ret.reverse() + + self._set_before_and_after(ret, rows) + + if rows: + key = "s%d" % min(r["stream_ordering"] for r in rows) + else: + # Assume we didn't get anything because there was nothing to + # get. + key = from_key + + return ret, key + res = yield self.runInteraction("get_recent_room_events_stream_for_room", f) + defer.returnValue(res) + + def get_room_changes_for_user(self, user_id, from_key, to_key): + if from_key is not None: + from_id = RoomStreamToken.parse_stream_token(from_key).stream + else: + from_id = None + to_id = RoomStreamToken.parse_stream_token(to_key).stream + + if from_key == to_key: + return defer.succeed([]) + + def f(txn): + if from_id is not None: + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND e.stream_ordering > ? AND e.stream_ordering <= ?" + " ORDER BY e.stream_ordering ASC" + ) + txn.execute(sql, (user_id, from_id, to_id,)) + else: + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND stream_ordering <= ?" + " ORDER BY stream_ordering ASC" + ) + txn.execute(sql, (user_id, to_id,)) + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + [r["event_id"] for r in rows], + get_prev_content=True + ) + + return ret + + return self.runInteraction("get_room_changes_for_user", f) + @log_function def get_room_events_stream( self, diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py new file mode 100644 index 0000000000..3a873c9c30 --- /dev/null +++ b/synapse/util/caches/room_change_cache.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.caches import cache_counter, caches_by_name + + +from blist import sorteddict +import logging + + +logger = logging.getLogger(__name__) + + +class RoomStreamChangeCache(object): + """Keeps track of the stream_id of the latest change in rooms. + + Given a list of rooms and stream key, it will give a subset of rooms that + may have changed since that key. If the key is too old then the cache + will simply return all rooms. + """ + def __init__(self, name, current_key, size_of_cache=10000): + self._size_of_cache = size_of_cache + self._room_to_key = {} + self._cache = sorteddict() + self._earliest_known_key = current_key + self.name = name + caches_by_name[self.name] = self._cache + + def get_room_has_changed(self, room_id, key): + if key <= self._earliest_known_key: + return True + + room_key = self._room_to_key.get(room_id, None) + if room_key is None: + return True + + if key < room_key: + return True + + return False + + def get_rooms_changed(self, store, room_ids, key): + """Returns subset of room ids that have had new things since the + given key. If the key is too old it will just return the given list. + """ + if key > self._earliest_known_key: + keys = self._cache.keys() + i = keys.bisect_right(key) + + result = set( + self._cache[k] for k in keys[i:] + ).intersection(room_ids) + + cache_counter.inc_hits(self.name) + else: + result = room_ids + cache_counter.inc_misses(self.name) + + return result + + def room_has_changed(self, store, room_id, key): + """Informs the cache that the room has been changed at the given key. + """ + if key > self._earliest_known_key: + old_key = self._room_to_key.get(room_id, None) + if old_key: + key = max(key, old_key) + self._cache.pop(old_key, None) + self._cache[key] = room_id + + while len(self._cache) > self._size_of_cache: + k, r = self._cache.popitem() + self._earliest_key = max(k, self._earliest_key) + self._room_to_key.pop(r, None) -- cgit 1.4.1 From aca3193efb8c5f9f20049f61c96e5ff12f328b05 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:06:52 +0000 Subject: Use the same path for incremental with gap or without gap --- synapse/handlers/sync.py | 352 +++++++++++++++++++--------------------------- synapse/storage/events.py | 1 - synapse/storage/stream.py | 6 +- 3 files changed, 147 insertions(+), 212 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1fdf978313..f5e20d6a6e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -72,7 +72,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ ) -class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [ +class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [ "room_id", # str "timeline", # TimelineBatch "state", # dict[(str, str), FrozenEvent] @@ -429,44 +429,20 @@ class SyncHandler(BaseHandler): defer.returnValue((now_token, ephemeral_by_room)) - @defer.inlineCallbacks def full_state_sync_for_archived_room(self, room_id, sync_config, leave_event_id, leave_token, timeline_since_token, tags_by_room, account_data_by_room): """Sync a room for a client which is starting without any state Returns: - A Deferred JoinedSyncResult. + A Deferred ArchivedSyncResult. """ - batch = yield self.load_filtered_recents( - room_id, sync_config, leave_token, since_token=timeline_since_token + return self.incremental_sync_for_archived_room( + sync_config, room_id, leave_event_id, timeline_since_token, tags_by_room, + account_data_by_room, full_state=True, leave_token=leave_token, ) - leave_state = yield self.store.get_state_for_event(leave_event_id) - - leave_state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - leave_state.values() - ) - } - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - defer.returnValue(ArchivedSyncResult( - room_id=room_id, - timeline=batch, - state=leave_state, - account_data=account_data, - )) - @defer.inlineCallbacks def incremental_sync_with_gap(self, sync_config, since_token): """ Get the incremental delta needed to bring the client up to @@ -512,173 +488,127 @@ class SyncHandler(BaseHandler): sync_config.user ) + user_id = sync_config.user.to_string() + timeline_limit = sync_config.filter_collection.timeline_limit() tags_by_room = yield self.store.get_updated_tags( - sync_config.user.to_string(), + user_id, since_token.account_data_key, ) account_data, account_data_by_room = ( yield self.store.get_updated_account_data_for_user( - sync_config.user.to_string(), + user_id, since_token.account_data_key, ) ) + # Get a list of membership change events that have happened. rooms_changed = yield self.store.get_room_changes_for_user( - sync_config.user.to_string(), since_token.room_key, now_token.room_key + user_id, since_token.room_key, now_token.room_key ) + mem_change_events_by_room_id = {} + for event in rooms_changed: + mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) + + newly_joined_rooms = [] + archived = [] + invited = [] + for room_id, events in mem_change_events_by_room_id.items(): + non_joins = [e for e in events if e.membership != Membership.JOIN] + has_join = len(non_joins) != len(events) + + # We want to figure out if we joined the room at some point since + # the last sync (even if we have since left). This is to make sure + # we do send down the room, and with full state, where necessary + if room_id in joined_room_ids or has_join: + old_state = yield self.get_state_at(room_id, since_token) + old_mem_ev = old_state.get((EventTypes.Member, user_id), None) + if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: + newly_joined_rooms.append(room_id) + + if room_id in joined_room_ids: + continue + + if not non_joins: + continue + + # Only bother if we're still currently invited + should_invite = non_joins[-1].membership == Membership.INVITE + if should_invite: + room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) + if room_sync: + invited.append(room_sync) + + # Always include leave/ban events. Just take the last one. + # TODO: How do we handle ban -> leave in same batch? + leave_events = [ + e for e in non_joins + if e.membership in (Membership.LEAVE, Membership.BAN) + ] + + if leave_events: + leave_event = leave_events[-1] + room_sync = yield self.incremental_sync_for_archived_room( + sync_config, room_id, leave_event.event_id, since_token, + tags_by_room, account_data_by_room, + full_state=room_id in newly_joined_rooms + ) + if room_sync: + archived.append(room_sync) + + # Get all events for rooms we're currently joined to. room_to_events = yield self.store.get_room_events_stream_for_rooms( - room_ids=room_ids, + room_ids=joined_room_ids, from_key=since_token.room_key, to_key=now_token.room_key, limit=timeline_limit + 1, ) - room_events = [ - event - for events, _ in room_to_events.values() - for event in events - ] - - room_events.extend(rooms_changed) - - # room_events, _ = yield self.store.get_room_events_stream( - # sync_config.user.to_string(), - # from_key=since_token.room_key, - # to_key=now_token.room_key, - # limit=timeline_limit + 1, - # ) - joined = [] - archived = [] - if len(room_events) <= timeline_limit: - # There is no gap in any of the rooms. Therefore we can just - # partition the new events by room and return them. - logger.debug("Got %i events for incremental sync - not limited", - len(room_events)) - - invite_events = [] - leave_events = [] - events_by_room_id = {} - for event in room_events: - events_by_room_id.setdefault(event.room_id, []).append(event) - if event.room_id not in joined_room_ids: - if (event.type == EventTypes.Member - and event.state_key == sync_config.user.to_string()): - if event.membership == Membership.INVITE: - invite_events.append(event) - elif event.membership in (Membership.LEAVE, Membership.BAN): - leave_events.append(event) - - for room_id in joined_room_ids: - recents = events_by_room_id.get(room_id, []) - logger.debug("Events for room %s: %r", room_id, recents) - state = { - (event.type, event.state_key): event - for event in recents if event.is_state()} - limited = False + # We loop through all room ids, even if there are no new events, in case + # there are non room events taht we need to notify about. + for room_id in joined_room_ids: + room_entry = room_to_events.get(room_id, None) - if recents: - prev_batch = now_token.copy_and_replace( - "room_key", recents[0].internal_metadata.before - ) - else: - prev_batch = now_token - - just_joined = yield self.check_joined_room(sync_config, state) - if just_joined: - logger.debug("User has just joined %s: needs full state", - room_id) - state = yield self.get_state_at(room_id, now_token) - # the timeline is inherently limited if we've just joined - limited = True - - recents = sync_config.filter_collection.filter_room_timeline(recents) - - state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - state.values() - ) - } - - acc_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) + if room_entry: + events, start_key = room_entry - acc_data = sync_config.filter_collection.filter_room_account_data( - acc_data - ) + prev_batch_token = now_token.copy_and_replace("room_key", start_key) - ephemeral = sync_config.filter_collection.filter_room_ephemeral( - ephemeral_by_room.get(room_id, []) - ) + newly_joined_room = room_id in newly_joined_rooms + full_state = newly_joined_room - room_sync = JoinedSyncResult( - room_id=room_id, - timeline=TimelineBatch( - events=recents, - prev_batch=prev_batch, - limited=limited, - ), - state=state, - ephemeral=ephemeral, - account_data=acc_data, - unread_notifications={}, + batch = yield self.load_filtered_recents( + room_id, sync_config, prev_batch_token, + since_token=since_token, + recents=events, + newly_joined_room=newly_joined_room, ) - logger.debug("Result for room %s: %r", room_id, room_sync) - - if room_sync: - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room - ) - - if notifs is not None: - notif_dict = room_sync.unread_notifications - notif_dict["notification_count"] = len(notifs) - notif_dict["highlight_count"] = len([ - 1 for notif in notifs - if _action_has_highlight(notif["actions"]) - ]) - - joined.append(room_sync) - - else: - logger.debug("Got %i events for incremental sync - hit limit", - len(room_events)) - - invite_events = yield self.store.get_invites_for_user( - sync_config.user.to_string() - ) - - leave_events = yield self.store.get_leave_and_ban_events_for_user( - sync_config.user.to_string() - ) - - for room_id in joined_room_ids: - room_sync = yield self.incremental_sync_with_gap_for_room( - room_id, sync_config, since_token, now_token, - ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room=all_ephemeral_by_room, + else: + batch = TimelineBatch( + events=[], + prev_batch=since_token, + limited=False, ) - if room_sync: - joined.append(room_sync) + full_state = False - for leave_event in leave_events: - room_sync = yield self.incremental_sync_for_archived_room( - sync_config, leave_event, since_token, tags_by_room, - account_data_by_room + room_sync = yield self.incremental_sync_with_gap_for_room( + room_id=room_id, + sync_config=sync_config, + since_token=since_token, + now_token=now_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + all_ephemeral_by_room=all_ephemeral_by_room, + batch=batch, + full_state=full_state, ) if room_sync: - archived.append(room_sync) - - invited = [ - InvitedSyncResult(room_id=event.room_id, invite=event) - for event in invite_events - ] + joined.append(room_sync) account_data_for_user = sync_config.filter_collection.filter_account_data( self.account_data_for_user(account_data) @@ -699,12 +629,10 @@ class SyncHandler(BaseHandler): @defer.inlineCallbacks def load_filtered_recents(self, room_id, sync_config, now_token, - since_token=None): + since_token=None, recents=None, newly_joined_room=False): """ :returns a Deferred TimelineBatch """ - limited = True - recents = [] filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() load_limit = max(timeline_limit * filtering_factor, 100) @@ -712,11 +640,27 @@ class SyncHandler(BaseHandler): room_key = now_token.room_key end_key = room_key + limited = recents is None or newly_joined_room or timeline_limit < len(recents) + + if recents is not None: + recents = sync_config.filter_collection.filter_room_timeline(recents) + recents = yield self._filter_events_for_client( + sync_config.user.to_string(), + recents, + is_peeking=sync_config.is_guest, + ) + else: + recents = [] + + since_key = None + if since_token and not newly_joined_room: + since_key = since_token.room_key + while limited and len(recents) < timeline_limit and max_repeat: - events, end_key = yield self.store.get_recent_room_events_stream_for_room( + events, end_key = yield self.store.get_room_events_stream_for_room( room_id, limit=load_limit + 1, - from_key=since_token.room_key if since_token else None, + from_key=since_key, to_key=end_key, ) loaded_recents = sync_config.filter_collection.filter_room_timeline(events) @@ -727,6 +671,7 @@ class SyncHandler(BaseHandler): ) loaded_recents.extend(recents) recents = loaded_recents + if len(events) <= load_limit: limited = False break @@ -742,7 +687,9 @@ class SyncHandler(BaseHandler): ) defer.returnValue(TimelineBatch( - events=recents, prev_batch=prev_batch_token, limited=limited + events=recents, + prev_batch=prev_batch_token, + limited=limited or newly_joined_room )) @defer.inlineCallbacks @@ -750,24 +697,8 @@ class SyncHandler(BaseHandler): since_token, now_token, ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room): - """ Get the incremental delta needed to bring the client up to date for - the room. Gives the client the most recent events and the changes to - state. - Returns: - A Deferred JoinedSyncResult - """ - logger.debug("Doing incremental sync for room %s between %s and %s", - room_id, since_token, now_token) - - # TODO(mjark): Check for redactions we might have missed. - - batch = yield self.load_filtered_recents( - room_id, sync_config, now_token, since_token, - ) - - logger.debug("Recents %r", batch) - + all_ephemeral_by_room, + batch, full_state=False): if batch.limited: current_state = yield self.get_state_at(room_id, now_token) @@ -832,43 +763,48 @@ class SyncHandler(BaseHandler): defer.returnValue(room_sync) @defer.inlineCallbacks - def incremental_sync_for_archived_room(self, sync_config, leave_event, + def incremental_sync_for_archived_room(self, sync_config, room_id, leave_event_id, since_token, tags_by_room, - account_data_by_room): + account_data_by_room, full_state, + leave_token=None): """ Get the incremental delta needed to bring the client up to date for the archived room. Returns: A Deferred ArchivedSyncResult """ - stream_token = yield self.store.get_stream_token_for_event( - leave_event.event_id - ) + if not leave_token: + stream_token = yield self.store.get_stream_token_for_event( + leave_event_id + ) - leave_token = since_token.copy_and_replace("room_key", stream_token) + leave_token = since_token.copy_and_replace("room_key", stream_token) - if since_token.is_after(leave_token): + if since_token and since_token.is_after(leave_token): defer.returnValue(None) batch = yield self.load_filtered_recents( - leave_event.room_id, sync_config, leave_token, since_token, + room_id, sync_config, leave_token, since_token, ) logger.debug("Recents %r", batch) state_events_at_leave = yield self.store.get_state_for_event( - leave_event.event_id + leave_event_id ) - state_at_previous_sync = yield self.get_state_at( - leave_event.room_id, stream_position=since_token - ) + if not full_state: + state_at_previous_sync = yield self.get_state_at( + room_id, stream_position=since_token + ) - state_events_delta = yield self.compute_state_delta( - since_token=since_token, - previous_state=state_at_previous_sync, - current_state=state_events_at_leave, - ) + state_events_delta = yield self.compute_state_delta( + since_token=since_token, + previous_state=state_at_previous_sync, + current_state=state_events_at_leave, + ) + else: + state_events_delta = state_events_at_leave state_events_delta = { (e.type, e.state_key): e @@ -878,7 +814,7 @@ class SyncHandler(BaseHandler): } account_data = self.account_data_for_room( - leave_event.room_id, tags_by_room, account_data_by_room + room_id, tags_by_room, account_data_by_room ) account_data = sync_config.filter_collection.filter_room_account_data( @@ -886,7 +822,7 @@ class SyncHandler(BaseHandler): ) room_sync = ArchivedSyncResult( - room_id=leave_event.room_id, + room_id=room_id, timeline=batch, state=state_events_delta, account_data=account_data, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index d96ea3a30e..0dd1daaa2e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,7 +128,6 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - logger.info("Invalidating %r at %r", event.room_id, stream_ordering) self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) except _RollbackButIsFineException: pass diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3a32a0019a..563e289c4e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -179,7 +179,7 @@ class StreamStore(SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ - self.get_recent_room_events_stream_for_room( + self.get_room_events_stream_for_room( room_id, from_key, to_key, limit ).addCallback(lambda r, rm: (rm, r), room_id) for room_id in room_ids @@ -189,7 +189,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(results) @defer.inlineCallbacks - def get_recent_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: @@ -246,7 +246,7 @@ class StreamStore(SQLBaseStore): key = from_key return ret, key - res = yield self.runInteraction("get_recent_room_events_stream_for_room", f) + res = yield self.runInteraction("get_room_events_stream_for_room", f) defer.returnValue(res) def get_room_changes_for_user(self, user_id, from_key, to_key): -- cgit 1.4.1 From e7febf4fbb1f1beb11e7a03252f6844f84af7f30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:11:04 +0000 Subject: PEP8 --- synapse/storage/events.py | 4 +++- synapse/storage/receipts.py | 2 -- synapse/storage/stream.py | 9 ++++++--- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 0dd1daaa2e..80187722ea 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,7 +128,9 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) + self._events_stream_cache.room_has_changed( + None, event.room_id, stream_ordering + ) except _RollbackButIsFineException: pass diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index b7a4e77748..7118368d97 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -15,12 +15,10 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches import cache_counter, caches_by_name from synapse.util.caches.room_change_cache import RoomStreamChangeCache from twisted.internet import defer -from blist import sorteddict import logging import ujson as json diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 563e289c4e..0b22251790 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -262,7 +262,8 @@ class StreamStore(SQLBaseStore): def f(txn): if from_id is not None: sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND e.stream_ordering > ? AND e.stream_ordering <= ?" @@ -271,7 +272,8 @@ class StreamStore(SQLBaseStore): txn.execute(sql, (user_id, from_id, to_id,)) else: sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND stream_ordering <= ?" @@ -307,7 +309,8 @@ class StreamStore(SQLBaseStore): "SELECT c.room_id FROM history_visibility AS h" " INNER JOIN current_state_events AS c" " ON h.event_id = c.event_id" - " WHERE c.room_id IN (%s) AND h.history_visibility = 'world_readable'" % ( + " WHERE c.room_id IN (%s)" + " AND h.history_visibility = 'world_readable'" % ( ",".join(map(lambda _: "?", room_ids)) ) ) -- cgit 1.4.1 From c5e7c0e436d9073c07887676817bb5a45314aea5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 09:58:45 +0000 Subject: Up get_rooms_for_user cache size --- synapse/storage/roommember.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index edfecced05..1d3e004c90 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -241,7 +241,7 @@ class RoomMemberStore(SQLBaseStore): return rows - @cached() + @cached(max_entries=5000) def get_rooms_for_user(self, user_id): return self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN], -- cgit 1.4.1 From ba8931829b0b601eb14049c92e0f21a10772576d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 11:34:17 +0000 Subject: Return correct type of token --- synapse/storage/stream.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 0b22251790..28721e6994 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -236,7 +236,7 @@ class StreamStore(SQLBaseStore): ret.reverse() - self._set_before_and_after(ret, rows) + self._set_before_and_after(ret, rows, topo_order=False) if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) @@ -581,10 +581,13 @@ class StreamStore(SQLBaseStore): return rows[0][0] if rows else 0 @staticmethod - def _set_before_and_after(events, rows): + def _set_before_and_after(events, rows, topo_order=True): for event, row in zip(events, rows): stream = row["stream_ordering"] - topo = event.depth + if topo_order: + topo = event.depth + else: + topo = None internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) -- cgit 1.4.1 From 4e7948b47a3f197682de82fc0cda07ebb08a581d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 11:52:34 +0000 Subject: Allow paginating backwards from stream token --- synapse/handlers/message.py | 15 +++++++++------ synapse/storage/stream.py | 16 ++++++++++++++-- tests/rest/client/v1/test_rooms.py | 9 +-------- 3 files changed, 24 insertions(+), 16 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b73ad62147..82c8cb5f0c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import SynapseError, AuthError, Codes +from synapse.api.errors import AuthError, Codes from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator @@ -119,9 +119,12 @@ class MessageHandler(BaseHandler): if source_config.direction == 'b': # if we're going backwards, we might need to backfill. This # requires that we have a topo token. - if room_token.topological is None: - raise SynapseError(400, "Invalid token: cannot paginate " - "backwards from a stream token") + if room_token.topological: + max_topo = room_token.topological + else: + max_topo = yield self.store.get_max_topological_token_for_stream_and_room( + room_id, room_token.stream + ) if membership == Membership.LEAVE: # If they have left the room then clamp the token to be before @@ -131,11 +134,11 @@ class MessageHandler(BaseHandler): member_event_id ) leave_token = RoomStreamToken.parse(leave_token) - if leave_token.topological < room_token.topological: + if leave_token.topological < max_topo: source_config.from_key = str(leave_token) yield self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, room_token.topological + room_id, max_topo ) events, next_key = yield data_source.get_pagination_rows( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 28721e6994..5096b46864 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -234,10 +234,10 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) - ret.reverse() - self._set_before_and_after(ret, rows, topo_order=False) + ret.reverse() + if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) else: @@ -570,6 +570,18 @@ class StreamStore(SQLBaseStore): row["topological_ordering"], row["stream_ordering"],) ) + def get_max_topological_token_for_stream_and_room(self, room_id, stream_key): + sql = ( + "SELECT max(topological_ordering) FROM events" + " WHERE room_id = ? AND stream_ordering < ?" + ) + return self._execute( + "get_max_topological_token_for_stream_and_room", None, + sql, room_id, stream_key, + ).addCallback( + lambda r: r[0][0] if r else 0 + ) + def _get_max_topological_txn(self, txn): txn.execute( "SELECT MAX(topological_ordering) FROM events" diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 2fe6f695f5..ad5dd3bd6e 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1044,13 +1044,6 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("chunk" in response) self.assertTrue("end" in response) - @defer.inlineCallbacks - def test_stream_token_is_rejected_for_back_pagination(self): - (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/messages?access_token=x&from=s0_0_0_0_0&dir=b" % - self.room_id) - self.assertEquals(400, code) - @defer.inlineCallbacks def test_stream_token_is_accepted_for_fwd_pagianation(self): token = "s0_0_0_0_0" @@ -1061,4 +1054,4 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("start" in response) self.assertEquals(token, response['start']) self.assertTrue("chunk" in response) - self.assertTrue("end" in response) \ No newline at end of file + self.assertTrue("end" in response) -- cgit 1.4.1 From 7ed2bbeb11d99ef97672497879e480f91db9b99b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 14:32:05 +0000 Subject: Clean up a bit. Add comment --- synapse/app/homeserver.py | 13 +++++++------ synapse/server.py | 4 ++-- synapse/storage/__init__.py | 27 +++++++++------------------ 3 files changed, 18 insertions(+), 26 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 504557b2fc..65562222cf 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -255,12 +255,13 @@ class SynapseHomeServer(HomeServer): quit_with_error(e.message) def get_db_conn(self): - db_conn = self.database_engine.module.connect( - **{ - k: v for k, v in self.db_config.get("args", {}).items() - if not k.startswith("cp_") - } - ) + # Any param beginning with cp_ is a parameter for adbapi, and should + # not be passed to the database engine. + db_params = { + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + db_conn = self.database_engine.module.connect(**db_params) self.database_engine.on_new_connection(db_conn) return db_conn diff --git a/synapse/server.py b/synapse/server.py index e013a349c9..5fee7fe130 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -28,7 +28,7 @@ from synapse.notifier import Notifier from synapse.api.auth import Auth from synapse.handlers import Handlers from synapse.state import StateHandler -from synapse.storage import get_datastore +from synapse.storage import DataStore from synapse.util import Clock from synapse.util.distributor import Distributor from synapse.streams.events import EventSources @@ -117,7 +117,7 @@ class HomeServer(object): def setup(self): logger.info("Setting up.") - self.datastore = get_datastore(self) + self.datastore = DataStore(self.get_db_conn(), self) logger.info("Finished setting up.") def get_ip_from_request(self, request): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c8cab45f77..eb88842308 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -61,22 +61,6 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 120*1000 -def get_datastore(hs): - logger.info("getting called!") - - conn = hs.get_db_conn() - try: - cur = conn.cursor() - cur.execute("SELECT MIN(stream_ordering) FROM events",) - rows = cur.fetchall() - min_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1 - min_token = min(min_token, -1) - - return DataStore(conn, hs, min_token) - finally: - conn.close() - - class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, PresenceStore, TransactionStore, @@ -98,10 +82,17 @@ class DataStore(RoomMemberStore, RoomStore, EventPushActionsStore ): - def __init__(self, db_conn, hs, min_stream_token): + def __init__(self, db_conn, hs): self.hs = hs - self.min_stream_token = min_stream_token + cur = db_conn.cursor() + try: + cur.execute("SELECT MIN(stream_ordering) FROM events",) + rows = cur.fetchall() + self.min_stream_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1 + self.min_stream_token = min(self.min_stream_token, -1) + finally: + cur.close() self.client_ip_last_seen = Cache( name="client_ip_last_seen", -- cgit 1.4.1 From e1941442d442fe62570551071edfd936304697e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 15:02:37 +0000 Subject: Invalidate caches properly. Remove unused arg --- synapse/storage/events.py | 9 ++++++--- synapse/storage/receipts.py | 10 ++++++---- synapse/storage/stream.py | 2 +- synapse/util/caches/room_change_cache.py | 4 ++-- 4 files changed, 15 insertions(+), 10 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 80187722ea..2d2270b297 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,9 +128,6 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - self._events_stream_cache.room_has_changed( - None, event.room_id, stream_ordering - ) except _RollbackButIsFineException: pass @@ -213,6 +210,12 @@ class EventsStore(SQLBaseStore): for event, _ in events_and_contexts: txn.call_after(self._invalidate_get_event_cache, event.event_id) + if not backfilled: + txn.call_after( + self._events_stream_cache.room_has_changed, + event.room_id, event.internal_metadata.stream_ordering, + ) + depth_updates = {} for event, _ in events_and_contexts: if event.internal_metadata.is_outlier(): diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 7118368d97..5ffbfdec51 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -78,7 +78,7 @@ class ReceiptsStore(SQLBaseStore): if from_key: room_ids = yield self._receipts_stream_cache.get_rooms_changed( - self, room_ids, from_key + room_ids, from_key ) results = yield self._get_linearized_receipts_for_rooms( @@ -221,6 +221,11 @@ class ReceiptsStore(SQLBaseStore): # FIXME: This shouldn't invalidate the whole cache txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) + txn.call_after( + self._receipts_stream_cache.room_has_changed, + room_id, stream_id + ) + # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts sql = ( @@ -308,9 +313,6 @@ class ReceiptsStore(SQLBaseStore): stream_id_manager = yield self._receipts_id_gen.get_next(self) with stream_id_manager as stream_id: - yield self._receipts_stream_cache.room_has_changed( - self, room_id, stream_id - ) have_persisted = yield self.runInteraction( "insert_linearized_receipt", self.insert_linearized_receipt_txn, diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 5096b46864..67e7e6a76f 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -169,7 +169,7 @@ class StreamStore(SQLBaseStore): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_rooms_changed( - self, room_ids, from_id + room_ids, from_id ) if not room_ids: diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py index 3a873c9c30..eb2ab5f1e4 100644 --- a/synapse/util/caches/room_change_cache.py +++ b/synapse/util/caches/room_change_cache.py @@ -51,7 +51,7 @@ class RoomStreamChangeCache(object): return False - def get_rooms_changed(self, store, room_ids, key): + def get_rooms_changed(self, room_ids, key): """Returns subset of room ids that have had new things since the given key. If the key is too old it will just return the given list. """ @@ -70,7 +70,7 @@ class RoomStreamChangeCache(object): return result - def room_has_changed(self, store, room_id, key): + def room_has_changed(self, room_id, key): """Informs the cache that the room has been changed at the given key. """ if key > self._earliest_known_key: -- cgit 1.4.1 From c23a8c783382a0789c757e16e104cf08654e6cf8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 15:55:26 +0000 Subject: Ensure keys to RoomStreamChangeCache are ints --- synapse/storage/stream.py | 11 ++++++----- synapse/util/caches/room_change_cache.py | 6 ++++++ 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 67e7e6a76f..6a724193e1 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -199,12 +199,13 @@ class StreamStore(SQLBaseStore): if from_key == to_key: defer.returnValue(([], from_key)) - has_changed = yield self._events_stream_cache.get_room_has_changed( - room_id, from_id - ) + if from_id: + has_changed = yield self._events_stream_cache.get_room_has_changed( + room_id, from_id + ) - if not has_changed: - defer.returnValue(([], from_key)) + if not has_changed: + defer.returnValue(([], from_key)) def f(txn): if from_id is not None: diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py index eb2ab5f1e4..e8bfedd72f 100644 --- a/synapse/util/caches/room_change_cache.py +++ b/synapse/util/caches/room_change_cache.py @@ -39,6 +39,8 @@ class RoomStreamChangeCache(object): caches_by_name[self.name] = self._cache def get_room_has_changed(self, room_id, key): + assert type(key) is int + if key <= self._earliest_known_key: return True @@ -55,6 +57,8 @@ class RoomStreamChangeCache(object): """Returns subset of room ids that have had new things since the given key. If the key is too old it will just return the given list. """ + assert type(key) is int + if key > self._earliest_known_key: keys = self._cache.keys() i = keys.bisect_right(key) @@ -73,6 +77,8 @@ class RoomStreamChangeCache(object): def room_has_changed(self, room_id, key): """Informs the cache that the room has been changed at the given key. """ + assert type(key) is int + if key > self._earliest_known_key: old_key = self._room_to_key.get(room_id, None) if old_key: -- cgit 1.4.1 From 00cb3eb24b277bb37bd1b7d8449c08a37cb4b014 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:37:41 +0000 Subject: Cache tags and account data --- synapse/storage/account_data.py | 20 ++++++- synapse/storage/events.py | 2 +- synapse/storage/receipts.py | 8 +-- synapse/storage/stream.py | 8 +-- synapse/storage/tags.py | 14 +++++ synapse/util/caches/room_change_cache.py | 92 ----------------------------- synapse/util/caches/stream_change_cache.py | 95 ++++++++++++++++++++++++++++++ 7 files changed, 137 insertions(+), 102 deletions(-) delete mode 100644 synapse/util/caches/room_change_cache.py create mode 100644 synapse/util/caches/stream_change_cache.py (limited to 'synapse/storage') diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 9c6597e012..95294c3f6c 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -14,6 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -23,6 +24,13 @@ logger = logging.getLogger(__name__) class AccountDataStore(SQLBaseStore): + def __init__(self, hs): + super(AccountDataStore, self).__init__(hs) + + self._account_data_stream_cache = StreamChangeCache( + "AccountDataChangeCache", self._account_data_id_gen.get_max_token(None), + max_size=1000, + ) def get_account_data_for_user(self, user_id): """Get all the client account_data for a user. @@ -83,7 +91,7 @@ class AccountDataStore(SQLBaseStore): "get_account_data_for_room", get_account_data_for_room_txn ) - def get_updated_account_data_for_user(self, user_id, stream_id): + def get_updated_account_data_for_user(self, user_id, stream_id, room_ids=None): """Get all the client account_data for a that's changed. Args: @@ -120,6 +128,12 @@ class AccountDataStore(SQLBaseStore): return (global_account_data, account_data_by_room) + changed = self._account_data_stream_cache.get_entity_has_changed( + user_id, int(stream_id) + ) + if not changed: + defer.returnValue(({}, {})) + return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn ) @@ -186,6 +200,10 @@ class AccountDataStore(SQLBaseStore): "content": content_json, } ) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id, + ) self._update_max_stream_id(txn, next_id) with (yield self._account_data_id_gen.get_next(self)) as next_id: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2d2270b297..5e85552029 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -212,7 +212,7 @@ class EventsStore(SQLBaseStore): if not backfilled: txn.call_after( - self._events_stream_cache.room_has_changed, + self._events_stream_cache.entity_has_changed, event.room_id, event.internal_metadata.stream_ordering, ) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 5ffbfdec51..8068c73740 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -15,7 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches.room_change_cache import RoomStreamChangeCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer @@ -30,7 +30,7 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = RoomStreamChangeCache( + self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) ) @@ -77,7 +77,7 @@ class ReceiptsStore(SQLBaseStore): room_ids = set(room_ids) if from_key: - room_ids = yield self._receipts_stream_cache.get_rooms_changed( + room_ids = yield self._receipts_stream_cache.get_entities_changed( room_ids, from_key ) @@ -222,7 +222,7 @@ class ReceiptsStore(SQLBaseStore): txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) txn.call_after( - self._receipts_stream_cache.room_has_changed, + self._receipts_stream_cache.entity_has_changed, room_id, stream_id ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 6a724193e1..c7d7893328 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,7 +37,7 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.caches.room_change_cache import RoomStreamChangeCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -81,7 +81,7 @@ class StreamStore(SQLBaseStore): def __init__(self, hs): super(StreamStore, self).__init__(hs) - self._events_stream_cache = RoomStreamChangeCache( + self._events_stream_cache = StreamChangeCache( "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) ) @@ -168,7 +168,7 @@ class StreamStore(SQLBaseStore): def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): from_id = RoomStreamToken.parse_stream_token(from_key).stream - room_ids = yield self._events_stream_cache.get_rooms_changed( + room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id ) @@ -200,7 +200,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], from_key)) if from_id: - has_changed = yield self._events_stream_cache.get_room_has_changed( + has_changed = yield self._events_stream_cache.get_entity_has_changed( room_id, from_id ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 4c39e07cbd..50af899192 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -15,6 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -24,6 +25,13 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): + def __init__(self, hs): + super(TagsStore, self).__init__(hs) + + self._tags_stream_cache = StreamChangeCache( + "TagsChangeCache", self._account_data_id_gen.get_max_token(None), + max_size=1000, + ) def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream @@ -80,6 +88,10 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids + changed = self._tags_stream_cache.get_entity_has_changed(user_id, int(stream_id)) + if not changed: + defer.returnValue({}) + room_ids = yield self.runInteraction( "get_updated_tags", get_updated_tags_txn ) @@ -177,6 +189,8 @@ class TagsStore(SQLBaseStore): next_id(int): The the revision to advance to. """ + txn.call_after(self._tags_stream_cache.entity_has_changed, user_id, next_id) + update_max_id_sql = ( "UPDATE account_data_max_stream_id" " SET stream_id = ?" diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py deleted file mode 100644 index e8bfedd72f..0000000000 --- a/synapse/util/caches/room_change_cache.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.util.caches import cache_counter, caches_by_name - - -from blist import sorteddict -import logging - - -logger = logging.getLogger(__name__) - - -class RoomStreamChangeCache(object): - """Keeps track of the stream_id of the latest change in rooms. - - Given a list of rooms and stream key, it will give a subset of rooms that - may have changed since that key. If the key is too old then the cache - will simply return all rooms. - """ - def __init__(self, name, current_key, size_of_cache=10000): - self._size_of_cache = size_of_cache - self._room_to_key = {} - self._cache = sorteddict() - self._earliest_known_key = current_key - self.name = name - caches_by_name[self.name] = self._cache - - def get_room_has_changed(self, room_id, key): - assert type(key) is int - - if key <= self._earliest_known_key: - return True - - room_key = self._room_to_key.get(room_id, None) - if room_key is None: - return True - - if key < room_key: - return True - - return False - - def get_rooms_changed(self, room_ids, key): - """Returns subset of room ids that have had new things since the - given key. If the key is too old it will just return the given list. - """ - assert type(key) is int - - if key > self._earliest_known_key: - keys = self._cache.keys() - i = keys.bisect_right(key) - - result = set( - self._cache[k] for k in keys[i:] - ).intersection(room_ids) - - cache_counter.inc_hits(self.name) - else: - result = room_ids - cache_counter.inc_misses(self.name) - - return result - - def room_has_changed(self, room_id, key): - """Informs the cache that the room has been changed at the given key. - """ - assert type(key) is int - - if key > self._earliest_known_key: - old_key = self._room_to_key.get(room_id, None) - if old_key: - key = max(key, old_key) - self._cache.pop(old_key, None) - self._cache[key] = room_id - - while len(self._cache) > self._size_of_cache: - k, r = self._cache.popitem() - self._earliest_key = max(k, self._earliest_key) - self._room_to_key.pop(r, None) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py new file mode 100644 index 0000000000..33b37f7f29 --- /dev/null +++ b/synapse/util/caches/stream_change_cache.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.caches import cache_counter, caches_by_name + + +from blist import sorteddict +import logging + + +logger = logging.getLogger(__name__) + + +class StreamChangeCache(object): + """Keeps track of the stream positions of the latest change in a set of entities. + + Typically the entity will be a room or user id. + + Given a list of entities and a stream position, it will give a subset of + entities that may have changed since that position. If position key is too + old then the cache will simply return all given entities. + """ + def __init__(self, name, current_stream_pos, max_size=10000): + self._max_size = max_size + self._entity_to_key = {} + self._cache = sorteddict() + self._earliest_known_stream_pos = current_stream_pos + self.name = name + caches_by_name[self.name] = self._cache + + def get_entity_has_changed(self, entity, stream_pos): + assert type(stream_pos) is int + + if stream_pos <= self._earliest_known_stream_pos: + return True + + latest_entity_change_pos = self._entity_to_key.get(entity, None) + if latest_entity_change_pos is None: + return True + + if stream_pos < latest_entity_change_pos: + return True + + return False + + def get_entities_changed(self, entities, stream_pos): + """Returns subset of entities that have had new things since the + given position. If the position is too old it will just return the given list. + """ + assert type(stream_pos) is int + + if stream_pos > self._earliest_known_stream_pos: + keys = self._cache.keys() + i = keys.bisect_right(stream_pos) + + result = set( + self._cache[k] for k in keys[i:] + ).intersection(entities) + + cache_counter.inc_hits(self.name) + else: + result = entities + cache_counter.inc_misses(self.name) + + return result + + def entity_has_changed(self, entitiy, stream_pos): + """Informs the cache that the entitiy has been changed at the given + position. + """ + assert type(stream_pos) is int + + if stream_pos > self._earliest_known_stream_pos: + old_pos = self._entity_to_key.get(entitiy, None) + if old_pos: + stream_pos = max(stream_pos, old_pos) + self._cache.pop(old_pos, None) + self._cache[stream_pos] = entitiy + + while len(self._cache) > self._max_size: + k, r = self._cache.popitem() + self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) + self._entity_to_key.pop(r, None) -- cgit 1.4.1 From 45cf827c8fe7163a51f1d0d7c9e2531da9b58c8d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:39:18 +0000 Subject: Change name and doc has_entity_changed --- synapse/storage/account_data.py | 2 +- synapse/storage/stream.py | 2 +- synapse/storage/tags.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 +++- 4 files changed, 6 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 95294c3f6c..62e49e1c0e 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -128,7 +128,7 @@ class AccountDataStore(SQLBaseStore): return (global_account_data, account_data_by_room) - changed = self._account_data_stream_cache.get_entity_has_changed( + changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index c7d7893328..6e81d46c60 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -200,7 +200,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], from_key)) if from_id: - has_changed = yield self._events_stream_cache.get_entity_has_changed( + has_changed = yield self._events_stream_cache.has_entity_changed( room_id, from_id ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 50af899192..75ce04092d 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -88,7 +88,7 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids - changed = self._tags_stream_cache.get_entity_has_changed(user_id, int(stream_id)) + changed = self._tags_stream_cache.has_entity_changed(user_id, int(stream_id)) if not changed: defer.returnValue({}) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 33b37f7f29..3ca0e57780 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -40,7 +40,9 @@ class StreamChangeCache(object): self.name = name caches_by_name[self.name] = self._cache - def get_entity_has_changed(self, entity, stream_pos): + def has_entity_changed(self, entity, stream_pos): + """Returns True if the entity may have been updated since stream_pos + """ assert type(stream_pos) is int if stream_pos <= self._earliest_known_stream_pos: -- cgit 1.4.1 From fdca8ec4187e1b4ea93cbfe17ece6ca4cbadd519 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:41:59 +0000 Subject: Add events index --- synapse/storage/schema/delta/28/events_room_stream.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 synapse/storage/schema/delta/28/events_room_stream.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/28/events_room_stream.sql b/synapse/storage/schema/delta/28/events_room_stream.sql new file mode 100644 index 0000000000..200c35e6e2 --- /dev/null +++ b/synapse/storage/schema/delta/28/events_room_stream.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +CREATE INDEX events_room_stream on events(room_id, stream_ordering); -- cgit 1.4.1 From 8fe8951a8d997a652d16758a7abed2b8afd117e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:09:09 +0000 Subject: Cache filters --- synapse/storage/filtering.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index f8fc9bdddc..5248736816 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -16,12 +16,13 @@ from twisted.internet import defer from ._base import SQLBaseStore +from synapse.util.caches.descriptors import cachedInlineCallbacks import simplejson as json class FilteringStore(SQLBaseStore): - @defer.inlineCallbacks + @cachedInlineCallbacks(num_args=2) def get_user_filter(self, user_localpart, filter_id): def_json = yield self._simple_select_one_onecol( table="user_filters", -- cgit 1.4.1 From 03b2c2577cbf51c80a42319689e1cb4903b8c4af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:29:24 +0000 Subject: Don't use defer.returnValue --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 62e49e1c0e..88404059e8 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -132,7 +132,7 @@ class AccountDataStore(SQLBaseStore): user_id, int(stream_id) ) if not changed: - defer.returnValue(({}, {})) + return ({}, {}) return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn -- cgit 1.4.1 From 467c27a1f90b873d6838ad1351399551cfa9cc07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:20:00 +0000 Subject: Amalgamate tags and account data stream caches --- synapse/storage/account_data.py | 3 ++- synapse/storage/tags.py | 18 +++++++----------- 2 files changed, 9 insertions(+), 12 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 88404059e8..822c8bbe00 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -28,7 +28,8 @@ class AccountDataStore(SQLBaseStore): super(AccountDataStore, self).__init__(hs) self._account_data_stream_cache = StreamChangeCache( - "AccountDataChangeCache", self._account_data_id_gen.get_max_token(None), + "AccountDataAndTagsChangeCache", + self._account_data_id_gen.get_max_token(None), max_size=1000, ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 75ce04092d..e1a9c0c261 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -15,7 +15,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached -from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -25,14 +24,6 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): - def __init__(self, hs): - super(TagsStore, self).__init__(hs) - - self._tags_stream_cache = StreamChangeCache( - "TagsChangeCache", self._account_data_id_gen.get_max_token(None), - max_size=1000, - ) - def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream @@ -88,7 +79,9 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids - changed = self._tags_stream_cache.has_entity_changed(user_id, int(stream_id)) + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(stream_id) + ) if not changed: defer.returnValue({}) @@ -189,7 +182,10 @@ class TagsStore(SQLBaseStore): next_id(int): The the revision to advance to. """ - txn.call_after(self._tags_stream_cache.entity_has_changed, user_id, next_id) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id + ) update_max_id_sql = ( "UPDATE account_data_max_stream_id" -- cgit 1.4.1 From ebc5f00efed5c7f72601933f55032947077c50a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 13:37:40 +0000 Subject: Bump AccountDataAndTagsChangeCache size --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 822c8bbe00..ed6587429b 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -30,7 +30,7 @@ class AccountDataStore(SQLBaseStore): self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", self._account_data_id_gen.get_max_token(None), - max_size=1000, + max_size=10000, ) def get_account_data_for_user(self, user_id): -- cgit 1.4.1 From 18579534ea67f2d98c189e2ddeccc4bfecb491eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:37:59 +0000 Subject: Prefill stream change caches --- synapse/storage/__init__.py | 49 +++++++++++++++++++++++++++++- synapse/storage/account_data.py | 9 ------ synapse/storage/stream.py | 8 ----- synapse/util/caches/stream_change_cache.py | 5 ++- 4 files changed, 52 insertions(+), 19 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index eb88842308..95ae97d507 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -45,9 +45,10 @@ from .search import SearchStore from .tags import TagsStore from .account_data import AccountDataStore - from util.id_generators import IdGenerator, StreamIdGenerator +from synapse.util.caches.stream_change_cache import StreamChangeCache + import logging @@ -117,8 +118,54 @@ class DataStore(RoomMemberStore, RoomStore, self._push_rule_id_gen = IdGenerator("push_rules", "id", self) self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) + events_max = self._stream_id_gen.get_max_token(None) + event_cache_prefill = self._get_cache_dict( + db_conn, "events", + entity_column="room_id", + stream_column="stream_ordering", + max_value=events_max, + ) + self._events_stream_cache = StreamChangeCache( + "EventsRoomStreamChangeCache", events_max, + prefilled_cache=event_cache_prefill, + ) + + account_max = self._account_data_id_gen.get_max_token(None) + account_cache_prefill = self._get_cache_dict( + db_conn, "account_data", + entity_column="user_id", + stream_column="stream_id", + max_value=account_max, + ) + self._account_data_stream_cache = StreamChangeCache( + "AccountDataAndTagsChangeCache", account_max, + prefilled_cache=account_cache_prefill, + ) + super(DataStore, self).__init__(hs) + def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): + sql = ( + "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" + " WHERE %(stream)s > max(? - 100000, 0)" + " GROUP BY %(entity)s" + " ORDER BY MAX(%(stream)s) DESC" + " LIMIT 10000" + ) % { + "table": table, + "entity": entity_column, + "stream": stream_column, + } + + txn = db_conn.cursor() + txn.execute(sql, (int(max_value),)) + rows = txn.fetchall() + + return { + row[0]: row[1] + for row in rows + } + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index ed6587429b..625d062eb1 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -14,7 +14,6 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -24,14 +23,6 @@ logger = logging.getLogger(__name__) class AccountDataStore(SQLBaseStore): - def __init__(self, hs): - super(AccountDataStore, self).__init__(hs) - - self._account_data_stream_cache = StreamChangeCache( - "AccountDataAndTagsChangeCache", - self._account_data_id_gen.get_max_token(None), - max_size=10000, - ) def get_account_data_for_user(self, user_id): """Get all the client account_data for a user. diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 6e81d46c60..e245d2f914 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,7 +37,6 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -78,13 +77,6 @@ def upper_bound(token): class StreamStore(SQLBaseStore): - def __init__(self, hs): - super(StreamStore, self).__init__(hs) - - self._events_stream_cache = StreamChangeCache( - "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) - ) - @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): # NB this lives here instead of appservice.py so we can reuse the diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index c673b1bdfc..891cb619fa 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -32,7 +32,7 @@ class StreamChangeCache(object): entities that may have changed since that position. If position key is too old then the cache will simply return all given entities. """ - def __init__(self, name, current_stream_pos, max_size=10000): + def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}): self._max_size = max_size self._entity_to_key = {} self._cache = sorteddict() @@ -40,6 +40,9 @@ class StreamChangeCache(object): self.name = name caches_by_name[self.name] = self._cache + for entity, stream_pos in prefilled_cache.items(): + self.entity_has_changed(entity, stream_pos) + def has_entity_changed(self, entity, stream_pos): """Returns True if the entity may have been updated since stream_pos """ -- cgit 1.4.1 From f67d60496a8a9b2c95fcacb6d4c539a1d4b6a105 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:41:16 +0000 Subject: Convert param style --- synapse/storage/__init__.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 95ae97d507..2ed505cb1e 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -85,6 +85,7 @@ class DataStore(RoomMemberStore, RoomStore, def __init__(self, db_conn, hs): self.hs = hs + self.database_engine = hs.database_engine cur = db_conn.cursor() try: @@ -157,6 +158,8 @@ class DataStore(RoomMemberStore, RoomStore, "stream": stream_column, } + sql = self.database_engine.convert_param_style(sql) + txn = db_conn.cursor() txn.execute(sql, (int(max_value),)) rows = txn.fetchall() -- cgit 1.4.1 From 45488e0ffae5100c3a82568642736aff203e1602 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:42:01 +0000 Subject: Max is not a function --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 2ed505cb1e..4d374a8b07 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -148,7 +148,7 @@ class DataStore(RoomMemberStore, RoomStore, def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): sql = ( "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" - " WHERE %(stream)s > max(? - 100000, 0)" + " WHERE %(stream)s > ? - 100000" " GROUP BY %(entity)s" " ORDER BY MAX(%(stream)s) DESC" " LIMIT 10000" -- cgit 1.4.1 From 3d60686c0ceeb88c4f6269110e92dc0c7bf5a3b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:49:11 +0000 Subject: Actually use cache --- synapse/storage/__init__.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4d374a8b07..957fff3c23 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -120,26 +120,26 @@ class DataStore(RoomMemberStore, RoomStore, self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) events_max = self._stream_id_gen.get_max_token(None) - event_cache_prefill = self._get_cache_dict( + event_cache_prefill, min_event_val = self._get_cache_dict( db_conn, "events", entity_column="room_id", stream_column="stream_ordering", max_value=events_max, ) self._events_stream_cache = StreamChangeCache( - "EventsRoomStreamChangeCache", events_max, + "EventsRoomStreamChangeCache", min_event_val, prefilled_cache=event_cache_prefill, ) account_max = self._account_data_id_gen.get_max_token(None) - account_cache_prefill = self._get_cache_dict( + account_cache_prefill, min_acc_val = self._get_cache_dict( db_conn, "account_data", entity_column="user_id", stream_column="stream_id", max_value=account_max, ) self._account_data_stream_cache = StreamChangeCache( - "AccountDataAndTagsChangeCache", account_max, + "AccountDataAndTagsChangeCache", min_acc_val, prefilled_cache=account_cache_prefill, ) @@ -151,7 +151,6 @@ class DataStore(RoomMemberStore, RoomStore, " WHERE %(stream)s > ? - 100000" " GROUP BY %(entity)s" " ORDER BY MAX(%(stream)s) DESC" - " LIMIT 10000" ) % { "table": table, "entity": entity_column, @@ -164,11 +163,18 @@ class DataStore(RoomMemberStore, RoomStore, txn.execute(sql, (int(max_value),)) rows = txn.fetchall() - return { - row[0]: row[1] + cache = { + row[0]: int(row[1]) for row in rows } + if cache: + min_val = min(cache.values()) + else: + min_val = max_value + + return cache, min_val + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) -- cgit 1.4.1 From b5dbced9389d072d4bd15002c7ddffba9e54340e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:53:59 +0000 Subject: Don't prefill account data --- synapse/storage/__init__.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 957fff3c23..a6cb588563 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -132,15 +132,8 @@ class DataStore(RoomMemberStore, RoomStore, ) account_max = self._account_data_id_gen.get_max_token(None) - account_cache_prefill, min_acc_val = self._get_cache_dict( - db_conn, "account_data", - entity_column="user_id", - stream_column="stream_id", - max_value=account_max, - ) self._account_data_stream_cache = StreamChangeCache( - "AccountDataAndTagsChangeCache", min_acc_val, - prefilled_cache=account_cache_prefill, + "AccountDataAndTagsChangeCache", account_max, ) super(DataStore, self).__init__(hs) -- cgit 1.4.1 From 8da95b6f1bb1a37597f0b89c4da88b064401b0b8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 15:39:17 +0000 Subject: Comment. Remove superfluous order by --- synapse/storage/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a6cb588563..ee2153737d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -139,11 +139,13 @@ class DataStore(RoomMemberStore, RoomStore, super(DataStore, self).__init__(hs) def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): + # Fetch a mapping of room_id -> max stream position for "recent" rooms. + # It doesn't really matter how many we get, the StreamChangeCache will + # do the right thing to ensure it respects the max size of cache. sql = ( "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" " WHERE %(stream)s > ? - 100000" " GROUP BY %(entity)s" - " ORDER BY MAX(%(stream)s) DESC" ) % { "table": table, "entity": entity_column, -- cgit 1.4.1 From cc9c97e0dc0cf399d5d6013f12746063091b619e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 16:41:51 +0000 Subject: Invalidate _account_data_stream_cache correctly --- synapse/storage/account_data.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 625d062eb1..b8387fc500 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -157,6 +157,10 @@ class AccountDataStore(SQLBaseStore): "content": content_json, } ) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id, + ) self._update_max_stream_id(txn, next_id) with (yield self._account_data_id_gen.get_next(self)) as next_id: -- cgit 1.4.1 From 25c311eaf603cef8cbf9e6501aad83d53c304ebb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 16:52:48 +0000 Subject: Cache get_room_changes_for_user --- synapse/storage/__init__.py | 4 ++++ synapse/storage/roommember.py | 4 ++++ synapse/storage/stream.py | 7 +++++++ 3 files changed, 15 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index ee2153737d..c91c7a3729 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -131,6 +131,10 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=event_cache_prefill, ) + self._membership_stream_cache = StreamChangeCache( + "MembershipStreamChangeCache", events_max, + ) + account_max = self._account_data_id_gen.get_max_token(None) self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", account_max, diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 1d3e004c90..3065b0c1a5 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -58,6 +58,10 @@ class RoomMemberStore(SQLBaseStore): txn.call_after(self.get_rooms_for_user.invalidate, (event.state_key,)) txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,)) txn.call_after(self.get_users_in_room.invalidate, (event.room_id,)) + txn.call_after( + self._membership_stream_cache.entity_has_changed, + event.state_key, event.internal_metadata.stream_ordering + ) def get_room_member(self, user_id, room_id): """Retrieve the current state of a room member. diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e245d2f914..cc9e623608 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -252,6 +252,13 @@ class StreamStore(SQLBaseStore): if from_key == to_key: return defer.succeed([]) + if from_id: + has_changed = self._membership_stream_cache.has_entity_changed( + user_id, int(from_id) + ) + if not has_changed: + return defer.succeed([]) + def f(txn): if from_id is not None: sql = ( -- cgit 1.4.1 From ceb6b8680a8e419c3c132dcdb675517c5e9f69fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 10:33:52 +0000 Subject: Only use room_ids if in get_room_events_stream if is_guest --- synapse/storage/stream.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e245d2f914..a60e662f7d 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -316,11 +316,6 @@ class StreamStore(SQLBaseStore): " WHERE m.user_id = ? AND m.membership = 'join'" ) current_room_membership_args = [user_id] - if room_ids: - current_room_membership_sql += " AND m.room_id in (%s)" % ( - ",".join(map(lambda _: "?", room_ids)) - ) - current_room_membership_args = [user_id] + room_ids # We also want to get any membership events about that user, e.g. # invites or leave notifications. -- cgit 1.4.1 From 4bf448be254808c83aeb5ae28e601752664bc9e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 16:26:51 +0000 Subject: Switch over /events to use per room caches --- synapse/handlers/room.py | 25 ++++++++++++++++++++----- synapse/storage/stream.py | 4 ++-- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 58e2d25f97..aca795e1c4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1008,15 +1008,30 @@ class RoomEventSource(object): limit=limit, ) else: - events, end_key = yield self.store.get_room_events_stream( - user_id=user.to_string(), + room_events = yield self.store.get_room_changes_for_user( + user.to_string(), from_key, to_key + ) + + room_to_events = yield self.store.get_room_events_stream_for_rooms( + room_ids=room_ids, from_key=from_key, to_key=to_key, - limit=limit, - room_ids=room_ids, - is_guest=is_guest, + limit=limit or 10, ) + events = list(room_events) + events.extend(e for evs, _ in room_to_events.values() for e in evs) + + events.sort(key=lambda e: e.internal_metadata.after) + + if limit: + events[:] = events[:limit] + + if events: + end_key = events[-1].internal_metadata.after + else: + end_key = to_key + defer.returnValue((events, end_key)) def get_current_key(self, direction='f'): diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 8dc8f5c640..fd84aa8996 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -39,7 +39,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken -from synapse.util.logutils import log_function import logging @@ -288,11 +287,12 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) + self._set_before_and_after(ret, rows, topo_order=False) + return ret return self.runInteraction("get_room_changes_for_user", f) - @log_function def get_room_events_stream( self, user_id, -- cgit 1.4.1 From 89b40b225cda4326081f6735b2a8a9bff5ce3446 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 16:32:46 +0000 Subject: Order things correctly --- synapse/handlers/room.py | 2 +- synapse/storage/stream.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index aca795e1c4..a71cba8ef1 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1022,7 +1022,7 @@ class RoomEventSource(object): events = list(room_events) events.extend(e for evs, _ in room_to_events.values() for e in evs) - events.sort(key=lambda e: e.internal_metadata.after) + events.sort(key=lambda e: e.internal_metadata.order) if limit: events[:] = events[:limit] diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index fd84aa8996..a03458c2fc 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -598,6 +598,10 @@ class StreamStore(SQLBaseStore): internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) + internal.order = ( + int(topo) if topo else 0, + int(stream), + ) @defer.inlineCallbacks def get_events_around(self, room_id, event_id, before_limit, after_limit): -- cgit 1.4.1 From 65e92eca4912848b03f71b7b7d29727015be31ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 15:19:34 +0000 Subject: Change the way we do public room list fetching --- synapse/handlers/room.py | 86 ++++++++++++++++------ synapse/storage/room.py | 2 +- .../storage/schema/delta/28/public_roms_index.sql | 16 ++++ 3 files changed, 80 insertions(+), 24 deletions(-) create mode 100644 synapse/storage/schema/delta/28/public_roms_index.sql (limited to 'synapse/storage') diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a71cba8ef1..1b3f624c67 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -876,31 +876,71 @@ class RoomListHandler(BaseHandler): @defer.inlineCallbacks def get_public_room_list(self): - chunk = yield self.store.get_rooms(is_public=True) - - room_members = yield defer.gatherResults( - [ - self.store.get_users_in_room(room["room_id"]) - for room in chunk - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - - avatar_urls = yield defer.gatherResults( - [ - self.get_room_avatar_url(room["room_id"]) - for room in chunk - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - - for i, room in enumerate(chunk): - room["num_joined_members"] = len(room_members[i]) - if avatar_urls[i]: - room["avatar_url"] = avatar_urls[i] + room_ids = yield self.store.get_public_room_ids() + + @defer.inlineCallbacks + def handle_room(room_id): + aliases = yield self.store.get_aliases_for_room(room_id) + if not aliases: + defer.returnValue(None) + + state = yield self.state_handler.get_current_state(room_id) + + result = {"aliases": aliases, "room_id": room_id} + + name_event = state.get((EventTypes.Name, ""), None) + if name_event: + name = name_event.content.get("name", None) + if name: + result["name"] = name + + topic_event = state.get((EventTypes.Topic, ""), None) + if topic_event: + topic = topic_event.content.get("topic", None) + if topic: + result["topic"] = topic + + canonical_event = state.get((EventTypes.CanonicalAlias, ""), None) + if canonical_event: + canonical_alias = canonical_event.content.get("alias", None) + if canonical_alias: + result["canonical_alias"] = canonical_alias + + visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) + visibility = None + if visibility_event: + visibility = visibility_event.content.get("history_visibility", None) + result["world_readable"] = visibility == "world_readable" + + guest_event = state.get((EventTypes.GuestAccess, ""), None) + guest = None + if guest_event: + guest = guest_event.content.get("guest_access", None) + result["guest_can_join"] = guest == "can_join" + + avatar_event = state.get(("m.room.avatar", ""), None) + if avatar_event: + avatar_url = avatar_event.content.get("url", None) + if avatar_url: + result["avatar_url"] = avatar_url + + result["num_joined_members"] = sum( + 1 for (event_type, _), ev in state.items() + if event_type == EventTypes.Member and ev.membership == Membership.JOIN + ) + + defer.returnValue(result) + + result = [] + for chunk in (room_ids[i:i+10] for i in xrange(0, len(room_ids), 10)): + chunk_result = yield defer.gatherResults([ + handle_room(room_id) + for room_id in chunk + ], consumeErrors=True).addErrback(unwrapFirstError) + result.extend(v for v in chunk_result if v) # FIXME (erikj): START is no longer a valid value - defer.returnValue({"start": "START", "end": "END", "chunk": chunk}) + defer.returnValue({"start": "START", "end": "END", "chunk": result}) @defer.inlineCallbacks def get_room_avatar_url(self, room_id): diff --git a/synapse/storage/room.py b/synapse/storage/room.py index dc09a3aaba..1b6311f332 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.descriptors import cachedInlineCallbacks, cached from .engines import PostgresEngine, Sqlite3Engine import collections diff --git a/synapse/storage/schema/delta/28/public_roms_index.sql b/synapse/storage/schema/delta/28/public_roms_index.sql new file mode 100644 index 0000000000..ba62a974a4 --- /dev/null +++ b/synapse/storage/schema/delta/28/public_roms_index.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +CREATE INDEX public_room_index on rooms(is_public); -- cgit 1.4.1 From 477b1ed6cfd130e5a004cda0c0b84509da2aa006 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 15:58:14 +0000 Subject: Fetch events in a separate transaction. This has a couple of benefits: - It reduces the time of transactions, allowing other database requests to run. - Fetching events is given a dedicated database thread, and so can't starve other database requests. --- synapse/storage/stream.py | 55 +++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 26 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index a03458c2fc..bcae3d718e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -220,27 +220,29 @@ class StreamStore(SQLBaseStore): rows = self.cursor_to_dict(txn) - ret = self._get_events_txn( - txn, - [r["event_id"] for r in rows], - get_prev_content=True - ) + return rows - self._set_before_and_after(ret, rows, topo_order=False) + rows = yield self.runInteraction("get_room_events_stream_for_room", f) - ret.reverse() + ret = yield self._get_events( + [r["event_id"] for r in rows], + get_prev_content=True + ) - if rows: - key = "s%d" % min(r["stream_ordering"] for r in rows) - else: - # Assume we didn't get anything because there was nothing to - # get. - key = from_key + self._set_before_and_after(ret, rows, topo_order=False) - return ret, key - res = yield self.runInteraction("get_room_events_stream_for_room", f) - defer.returnValue(res) + ret.reverse() + if rows: + key = "s%d" % min(r["stream_ordering"] for r in rows) + else: + # Assume we didn't get anything because there was nothing to + # get. + key = from_key + + defer.returnValue((ret, key)) + + @defer.inlineCallbacks def get_room_changes_for_user(self, user_id, from_key, to_key): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream @@ -249,14 +251,14 @@ class StreamStore(SQLBaseStore): to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: - return defer.succeed([]) + defer.returnValue([]) if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id) ) if not has_changed: - return defer.succeed([]) + defer.returnValue([]) def f(txn): if from_id is not None: @@ -281,17 +283,18 @@ class StreamStore(SQLBaseStore): txn.execute(sql, (user_id, to_id,)) rows = self.cursor_to_dict(txn) - ret = self._get_events_txn( - txn, - [r["event_id"] for r in rows], - get_prev_content=True - ) + return rows + + rows = yield self.runInteraction("get_room_changes_for_user", f) - self._set_before_and_after(ret, rows, topo_order=False) + ret = yield self._get_events( + [r["event_id"] for r in rows], + get_prev_content=True + ) - return ret + self._set_before_and_after(ret, rows, topo_order=False) - return self.runInteraction("get_room_changes_for_user", f) + defer.returnValue(ret) def get_room_events_stream( self, -- cgit 1.4.1 From 8a391e33ae21f9a62c57cca8eea47435a14a6247 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 16:12:10 +0000 Subject: s/get_room_changes_for_user/get_membership_changes_for_user/ --- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/storage/stream.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 68e2c75a48..799221c198 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1013,7 +1013,7 @@ class RoomEventSource(object): limit=limit, ) else: - room_events = yield self.store.get_room_changes_for_user( + room_events = yield self.store.get_membership_changes_for_user( user.to_string(), from_key, to_key ) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 8d8d10da33..dc686db541 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -479,7 +479,7 @@ class SyncHandler(BaseHandler): ) # Get a list of membership change events that have happened. - rooms_changed = yield self.store.get_room_changes_for_user( + rooms_changed = yield self.store.get_membership_changes_for_user( user_id, since_token.room_key, now_token.room_key ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index bcae3d718e..338a9d40d5 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -243,7 +243,7 @@ class StreamStore(SQLBaseStore): defer.returnValue((ret, key)) @defer.inlineCallbacks - def get_room_changes_for_user(self, user_id, from_key, to_key): + def get_membership_changes_for_user(self, user_id, from_key, to_key): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: @@ -285,7 +285,7 @@ class StreamStore(SQLBaseStore): return rows - rows = yield self.runInteraction("get_room_changes_for_user", f) + rows = yield self.runInteraction("get_membership_changes_for_user", f) ret = yield self._get_events( [r["event_id"] for r in rows], -- cgit 1.4.1 From d83d004ccdb7ace1dcb51b8acf7645bc176b10a5 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 2 Feb 2016 17:18:50 +0000 Subject: Fix flake8 warnings for new flake8 --- setup.cfg | 1 + synapse/api/auth.py | 2 +- synapse/app/__init__.py | 19 ++++++++++++++++ synapse/app/homeserver.py | 38 +++++++++----------------------- synapse/appservice/api.py | 2 +- synapse/federation/federation_client.py | 2 +- synapse/handlers/_base.py | 2 +- synapse/handlers/directory.py | 4 ++-- synapse/handlers/events.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/notifier.py | 2 +- synapse/push/push_rule_evaluator.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/pusher.py | 4 ++-- synapse/rest/client/v1/register.py | 3 ++- synapse/rest/client/v2_alpha/register.py | 3 ++- synapse/rest/client/versions.py | 4 +--- synapse/server.py | 2 +- synapse/state.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 7 ++++-- synapse/storage/engines/sqlite3.py | 2 +- synapse/storage/event_federation.py | 2 +- synapse/storage/events.py | 6 ++--- synapse/storage/stream.py | 2 +- synapse/util/__init__.py | 2 +- synapse/util/caches/descriptors.py | 4 ++-- synapse/util/caches/expiringcache.py | 2 +- synapse/util/caches/treecache.py | 2 +- synapse/util/logutils.py | 2 +- synapse/util/ratelimitutils.py | 2 +- 34 files changed, 73 insertions(+), 66 deletions(-) (limited to 'synapse/storage') diff --git a/setup.cfg b/setup.cfg index ba027c7d13..e7fc5ffe78 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,3 +16,4 @@ ignore = [flake8] max-line-length = 90 +ignore = W503 diff --git a/synapse/api/auth.py b/synapse/api/auth.py index b5536e8565..c5a2865e26 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -574,7 +574,7 @@ class Auth(object): raise AuthError( 403, "Application service has not registered this user" - ) + ) defer.returnValue(user_id) @defer.inlineCallbacks diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index bfebb0f644..1bc4279807 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -12,3 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import sys +sys.dont_write_bytecode = True + +from synapse.python_dependencies import ( + check_requirements, MissingRequirementError +) # NOQA + +try: + check_requirements() +except MissingRequirementError as e: + message = "\n".join([ + "Missing Requirement: %s" % (e.message,), + "To install run:", + " pip install --upgrade --force \"%s\"" % (e.dependency,), + "", + ]) + sys.stderr.writelines(message) + sys.exit(1) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e5066c48ef..c3066d6a0d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -14,27 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import synapse + +import contextlib +import logging +import os +import re +import resource +import subprocess import sys -from synapse.rest import ClientRestResource +import time -sys.dont_write_bytecode = True from synapse.python_dependencies import ( - check_requirements, DEPENDENCY_LINKS, MissingRequirementError + check_requirements, DEPENDENCY_LINKS ) -if __name__ == '__main__': - try: - check_requirements() - except MissingRequirementError as e: - message = "\n".join([ - "Missing Requirement: %s" % (e.message,), - "To install run:", - " pip install --upgrade --force \"%s\"" % (e.dependency,), - "", - ]) - sys.stderr.writelines(message) - sys.exit(1) - +from synapse.rest import ClientRestResource from synapse.storage.engines import create_engine, IncorrectDatabaseSetup from synapse.storage import are_all_users_on_domain from synapse.storage.prepare_database import UpgradeDatabaseException @@ -73,17 +68,6 @@ from synapse import events from daemonize import Daemonize -import synapse - -import contextlib -import logging -import os -import re -import resource -import subprocess -import time - - logger = logging.getLogger("synapse.app.homeserver") diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index e1c07028e8..bc90605324 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -29,7 +29,7 @@ class ApplicationServiceApi(SimpleHttpClient): pushing. """ - def __init__(self, hs): + def __init__(self, hs): super(ApplicationServiceApi, self).__init__(hs) self.clock = hs.get_clock() diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c6259f9dc8..e30e2da58d 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -57,7 +57,7 @@ class FederationClient(FederationBase): cache_name="get_pdu_cache", clock=self._clock, max_len=1000, - expiry_ms=120*1000, + expiry_ms=120 * 1000, reset_expiry_on_get=False, ) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 744a9ee507..1423df6cf3 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -147,7 +147,7 @@ class BaseHandler(object): ) if not allowed: raise LimitExceededError( - retry_after_ms=int(1000*(time_allowed - time_now)), + retry_after_ms=int(1000 * (time_allowed - time_now)), ) @defer.inlineCallbacks diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 691564c651..4efecb1ffd 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -175,8 +175,8 @@ class DirectoryHandler(BaseHandler): # If this server is in the list of servers, return it first. if self.server_name in servers: servers = ( - [self.server_name] - + [s for s in servers if s != self.server_name] + [self.server_name] + + [s for s in servers if s != self.server_name] ) else: servers = list(servers) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 254b483da6..5ad8f3779a 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -130,7 +130,7 @@ class EventStreamHandler(BaseHandler): # Add some randomness to this value to try and mitigate against # thundering herds on restart. - timeout = random.randint(int(timeout*0.9), int(timeout*1.1)) + timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1)) events, tokens = yield self.notifier.get_events_for( auth_user, pagin_config, timeout, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d36eb3b8d7..d0c21ff5c9 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -34,7 +34,7 @@ metrics = synapse.metrics.get_metrics_for(__name__) # Don't bother bumping "last active" time if it differs by less than 60 seconds -LAST_ACTIVE_GRANULARITY = 60*1000 +LAST_ACTIVE_GRANULARITY = 60 * 1000 # Keep no more than this number of offline serial revisions MAX_OFFLINE_SERIALS = 1000 diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index abd1a16a41..b8fbcf9233 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -213,7 +213,7 @@ class RegistrationHandler(BaseHandler): 400, "User ID must only contain characters which do not" " require URL encoding." - ) + ) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 799221c198..088b76d237 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -927,7 +927,7 @@ class RoomContextHandler(BaseHandler): Returns: dict, or None if the event isn't found """ - before_limit = math.floor(limit/2.) + before_limit = math.floor(limit / 2.) after_limit = limit - before_limit now_token = yield self.hs.get_event_sources().get_current_token() diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index da13e32e78..c3589534f8 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -152,7 +152,7 @@ class MatrixFederationHttpClient(object): return self.clock.time_bound_deferred( request_deferred, - time_out=timeout/1000. if timeout else 60, + time_out=timeout / 1000. if timeout else 60, ) response = yield preserve_context_over_fn( diff --git a/synapse/notifier.py b/synapse/notifier.py index 29965a9ab5..1a90bd55cd 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -308,7 +308,7 @@ class Notifier(object): def timed_out(): if listener: listener.deferred.cancel() - timer = self.clock.call_later(timeout/1000., timed_out) + timer = self.clock.call_later(timeout / 1000., timed_out) prev_token = from_token while not result: diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index dca018af95..2a2b4437dc 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -304,7 +304,7 @@ def _flatten_dict(d, prefix=[], result={}): if isinstance(value, basestring): result[".".join(prefix + [key])] = value.lower() elif hasattr(value, "items"): - _flatten_dict(value, prefix=(prefix+[key]), result=result) + _flatten_dict(value, prefix=(prefix + [key]), result=result) return result diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 07836709fb..7199113dac 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -89,7 +89,7 @@ class LoginRestServlet(ClientV1RestServlet): LoginRestServlet.SAML2_TYPE): relay_state = "" if "relay_state" in login_submission: - relay_state = "&RelayState="+urllib.quote( + relay_state = "&RelayState=" + urllib.quote( login_submission["relay_state"]) result = { "uri": "%s%s" % (self.idp_redirect_url, relay_state) diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index e218ed215c..5547f1b112 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -52,7 +52,7 @@ class PusherRestServlet(ClientV1RestServlet): if i not in content: missing.append(i) if len(missing): - raise SynapseError(400, "Missing parameters: "+','.join(missing), + raise SynapseError(400, "Missing parameters: " + ','.join(missing), errcode=Codes.MISSING_PARAM) logger.debug("set pushkey %s to kind %s", content['pushkey'], content['kind']) @@ -83,7 +83,7 @@ class PusherRestServlet(ClientV1RestServlet): data=content['data'] ) except PusherConfigException as pce: - raise SynapseError(400, "Config Error: "+pce.message, + raise SynapseError(400, "Config Error: " + pce.message, errcode=Codes.MISSING_PARAM) defer.returnValue((200, {})) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 5378a9a938..2bfd4d96bf 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -38,7 +38,8 @@ logger = logging.getLogger(__name__) if hasattr(hmac, "compare_digest"): compare_digest = hmac.compare_digest else: - compare_digest = lambda a, b: a == b + def compare_digest(a, b): + return a == b class RegisterRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5d50dd9e3d..56a5bbec30 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -34,7 +34,8 @@ from synapse.util.async import run_on_reactor if hasattr(hmac, "compare_digest"): compare_digest = hmac.compare_digest else: - compare_digest = lambda a, b: a == b + def compare_digest(a, b): + return a == b logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 349ef6b396..ca5468c402 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -26,9 +26,7 @@ class VersionsRestServlet(RestServlet): def on_GET(self, request): return (200, { - "versions": [ - "r0.0.1", - ] + "versions": ["r0.0.1"] }) diff --git a/synapse/server.py b/synapse/server.py index 5fee7fe130..368d615576 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -23,7 +23,7 @@ from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.enterprise import adbapi from synapse.federation import initialize_http_replication -from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory +from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory from synapse.notifier import Notifier from synapse.api.auth import Auth from synapse.handlers import Handlers diff --git a/synapse/state.py b/synapse/state.py index 0acf309fe0..b9a1387520 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -63,7 +63,7 @@ class StateHandler(object): cache_name="state_cache", clock=self.clock, max_len=SIZE_OF_CACHE, - expiry_ms=EVICTION_TIMEOUT_SECONDS*1000, + expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, reset_expiry_on_get=True, ) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c91c7a3729..5a9e7720d9 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -59,7 +59,7 @@ logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits # 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120*1000 +LAST_SEEN_GRANULARITY = 120 * 1000 class DataStore(RoomMemberStore, RoomStore, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 5e77320540..cfb87d9328 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -185,7 +185,7 @@ class SQLBaseStore(object): time_then = self._previous_loop_ts self._previous_loop_ts = time_now - ratio = (curr - prev)/(time_now - time_then) + ratio = (curr - prev) / (time_now - time_then) top_three_counters = self._txn_perf_counters.interval( time_now - time_then, limit=3 @@ -643,7 +643,10 @@ class SQLBaseStore(object): if not iterable: defer.returnValue(results) - chunks = [iterable[i:i+batch_size] for i in xrange(0, len(iterable), batch_size)] + chunks = [ + iterable[i:i + batch_size] + for i in xrange(0, len(iterable), batch_size) + ] for chunk in chunks: rows = yield self.runInteraction( desc, diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py index 400c10103c..91fac33b8b 100644 --- a/synapse/storage/engines/sqlite3.py +++ b/synapse/storage/engines/sqlite3.py @@ -54,7 +54,7 @@ class Sqlite3Engine(object): def _parse_match_info(buf): bufsize = len(buf) - return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)] + return [struct.unpack('@I', buf[i:i + 4])[0] for i in range(0, bufsize, 4)] def _rank(raw_match_info): diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 5f32eec6f8..ce2c794025 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -58,7 +58,7 @@ class EventFederationStore(SQLBaseStore): new_front = set() front_list = list(front) chunks = [ - front_list[x:x+100] + front_list[x:x + 100] for x in xrange(0, len(front), 100) ] for chunk in chunks: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 5e85552029..4d7cdd00d0 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -84,7 +84,7 @@ class EventsStore(SQLBaseStore): event.internal_metadata.stream_ordering = stream chunks = [ - events_and_contexts[x:x+100] + events_and_contexts[x:x + 100] for x in xrange(0, len(events_and_contexts), 100) ] @@ -740,7 +740,7 @@ class EventsStore(SQLBaseStore): rows = [] N = 200 for i in range(1 + len(events) / N): - evs = events[i*N:(i + 1)*N] + evs = events[i * N:(i + 1) * N] if not evs: break @@ -755,7 +755,7 @@ class EventsStore(SQLBaseStore): " LEFT JOIN rejections as rej USING (event_id)" " LEFT JOIN redactions as r ON e.event_id = r.redacts" " WHERE e.event_id IN (%s)" - ) % (",".join(["?"]*len(evs)),) + ) % (",".join(["?"] * len(evs)),) txn.execute(sql, evs) rows.extend(self.cursor_to_dict(txn)) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 338a9d40d5..2c49a5e499 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -168,7 +168,7 @@ class StreamStore(SQLBaseStore): results = {} room_ids = list(room_ids) - for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): + for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ self.get_room_events_stream_for_room( room_id, from_key, to_key, limit diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f1fe963adf..7566d9eb33 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -46,7 +46,7 @@ class Clock(object): def looping_call(self, f, msec): l = task.LoopingCall(f) - l.start(msec/1000.0, now=False) + l.start(msec / 1000.0, now=False) return l def stop_looping_call(self, loop): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 88e56e3302..e27917c63a 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -149,7 +149,7 @@ class CacheDescriptor(object): self.lru = lru self.tree = tree - self.arg_names = inspect.getargspec(orig).args[1:num_args+1] + self.arg_names = inspect.getargspec(orig).args[1:num_args + 1] if len(self.arg_names) < self.num_args: raise Exception( @@ -250,7 +250,7 @@ class CacheListDescriptor(object): self.num_args = num_args self.list_name = list_name - self.arg_names = inspect.getargspec(orig).args[1:num_args+1] + self.arg_names = inspect.getargspec(orig).args[1:num_args + 1] self.list_pos = self.arg_names.index(self.list_name) self.cache = cache diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 494226f5ea..62cae99649 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -55,7 +55,7 @@ class ExpiringCache(object): def f(): self._prune_cache() - self._clock.looping_call(f, self._expiry_ms/2) + self._clock.looping_call(f, self._expiry_ms / 2) def __setitem__(self, key, value): now = self._clock.time_msec() diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 29d02f7e95..03bc1401b7 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -58,7 +58,7 @@ class TreeCache(object): if n: break - node_and_keys[i+1][0].pop(k) + node_and_keys[i + 1][0].pop(k) popped, cnt = _strip_and_count_entires(popped) self.size -= cnt diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index d5b1a37eff..c37a157787 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -111,7 +111,7 @@ def time_function(f): _log_debug_as_f( f, "[FUNC END] {%s-%d} %f", - (func_name, id, end-start,), + (func_name, id, end - start,), ) return r diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index c37d6f12e3..ea321bc6a9 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -163,7 +163,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = sleep(self.sleep_msec/1000.0) + ret_defer = sleep(self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) -- cgit 1.4.1 From b32121a5d1c833ab3e2c164e642ef982364069e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 10:30:56 +0000 Subject: Unused import --- synapse/storage/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 1b6311f332..dc09a3aaba 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks, cached +from synapse.util.caches.descriptors import cachedInlineCallbacks from .engines import PostgresEngine, Sqlite3Engine import collections -- cgit 1.4.1 From 771528ab1323715271b9e968d2d337b88910fb2f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 10:50:49 +0000 Subject: Change event_push_actions_rm_tokens schema --- synapse/handlers/sync.py | 6 +-- synapse/push/__init__.py | 2 +- synapse/storage/event_push_actions.py | 47 ++++++++++++++++-------- synapse/storage/prepare_database.py | 2 +- synapse/storage/schema/delta/29/push_actions.sql | 31 ++++++++++++++++ 5 files changed, 67 insertions(+), 21 deletions(-) create mode 100644 synapse/storage/schema/delta/29/push_actions.sql (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index dc686db541..0292e06733 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -706,10 +706,8 @@ class SyncHandler(BaseHandler): ) if notifs is not None: - unread_notifications["notification_count"] = len(notifs) - unread_notifications["highlight_count"] = len([ - 1 for notif in notifs if _action_has_highlight(notif["actions"]) - ]) + unread_notifications["notification_count"] = notifs["notify_count"] + unread_notifications["highlight_count"] = notifs["highlight_count"] logger.debug("Room sync: %r", room_sync) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9bc0b356f4..8b9d0f03e5 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -316,7 +316,7 @@ class Pusher(object): r.room_id, self.user_id, last_unread_event_id ) ) - badge += len(notifs) + badge += notifs["notify_count"] defer.returnValue(badge) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index a05c4f84cf..aca3219206 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -37,7 +37,11 @@ class EventPushActionsStore(SQLBaseStore): 'event_id': event.event_id, 'user_id': uid, 'profile_tag': profile_tag, - 'actions': json.dumps(actions) + 'actions': json.dumps(actions), + 'stream_ordering': event.internal_metadata.stream_ordering, + 'topological_ordering': event.depth, + 'notif': 1, + 'highlight': 1 if _action_has_highlight(actions) else 0, }) def f(txn): @@ -74,26 +78,28 @@ class EventPushActionsStore(SQLBaseStore): topological_ordering = results[0][1] sql = ( - "SELECT ea.event_id, ea.actions" - " FROM event_push_actions ea, events e" - " WHERE ea.room_id = e.room_id" - " AND ea.event_id = e.event_id" - " AND ea.user_id = ?" - " AND ea.room_id = ?" + "SELECT sum(notif), sum(highlight)" + " FROM event_push_actions ea" + " WHERE" + " user_id = ?" + " AND room_id = ?" " AND (" - " e.topological_ordering > ?" - " OR (e.topological_ordering = ? AND e.stream_ordering > ?)" + " topological_ordering > ?" + " OR (topological_ordering = ? AND stream_ordering > ?)" ")" ) txn.execute(sql, ( user_id, room_id, topological_ordering, topological_ordering, stream_ordering - ) - ) - return [ - {"event_id": row[0], "actions": json.loads(row[1])} - for row in txn.fetchall() - ] + )) + row = txn.fetchone() + if row: + return { + "notify_count": row[0] or 0, + "highlight_count": row[1] or 0, + } + else: + return {"notify_count": 0, "highlight_count": 0} ret = yield self.runInteraction( "get_unread_event_push_actions_by_room", @@ -117,3 +123,14 @@ class EventPushActionsStore(SQLBaseStore): "remove_push_actions_for_event_id", f ) + + +def _action_has_highlight(actions): + for action in actions: + try: + if action.get("set_tweak", None) == "highlight": + return action.get("value", True) + except AttributeError: + pass + + return False diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c1f5f99789..d782b8e25b 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 28 +SCHEMA_VERSION = 29 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/29/push_actions.sql b/synapse/storage/schema/delta/29/push_actions.sql new file mode 100644 index 0000000000..7e7b09820a --- /dev/null +++ b/synapse/storage/schema/delta/29/push_actions.sql @@ -0,0 +1,31 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE event_push_actions ADD COLUMN topological_ordering BIGINT; +ALTER TABLE event_push_actions ADD COLUMN stream_ordering BIGINT; +ALTER TABLE event_push_actions ADD COLUMN notif SMALLINT; +ALTER TABLE event_push_actions ADD COLUMN highlight SMALLINT; + +UPDATE event_push_actions SET stream_ordering = ( + SELECT stream_ordering FROM events WHERE event_id = event_push_actions.event_id +), topological_ordering = ( + SELECT topological_ordering FROM events WHERE event_id = event_push_actions.event_id +); + +UPDATE event_push_actions SET notif = 1, highlight = 0; + +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( + user_id, room_id, topological_ordering, stream_ordering +); -- cgit 1.4.1 From f8aae79a72e462f4af65a22d0665192867522174 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 13:23:32 +0000 Subject: Simplify get_rooms --- synapse/app/homeserver.py | 4 +-- synapse/storage/room.py | 84 ++++------------------------------------------ tests/storage/test_room.py | 26 -------------- 3 files changed, 9 insertions(+), 105 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c3066d6a0d..0a6a19033d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -674,8 +674,8 @@ def run(hs): stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() - all_rooms = yield hs.get_datastore().get_rooms(False) - stats["total_room_count"] = len(all_rooms) + room_count = yield hs.get_datastore().get_room_count() + stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() daily_messages = yield hs.get_datastore().count_daily_messages() diff --git a/synapse/storage/room.py b/synapse/storage/room.py index dc09a3aaba..46ab38a313 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -87,90 +87,20 @@ class RoomStore(SQLBaseStore): desc="get_public_room_ids", ) - @defer.inlineCallbacks - def get_rooms(self, is_public): - """Retrieve a list of all public rooms. - - Args: - is_public (bool): True if the rooms returned should be public. - Returns: - A list of room dicts containing at least a "room_id" key, a - "topic" key if one is set, and a "name" key if one is set + def get_room_count(self): + """Retrieve a list of all rooms """ def f(txn): - def subquery(table_name, column_name=None): - column_name = column_name or table_name - return ( - "SELECT %(table_name)s.event_id as event_id, " - "%(table_name)s.room_id as room_id, %(column_name)s " - "FROM %(table_name)s " - "INNER JOIN current_state_events as c " - "ON c.event_id = %(table_name)s.event_id " % { - "column_name": column_name, - "table_name": table_name, - } - ) - - sql = ( - "SELECT" - " r.room_id," - " max(n.name)," - " max(t.topic)," - " max(v.history_visibility)," - " max(g.guest_access)" - " FROM rooms AS r" - " LEFT JOIN (%(topic)s) AS t ON t.room_id = r.room_id" - " LEFT JOIN (%(name)s) AS n ON n.room_id = r.room_id" - " LEFT JOIN (%(history_visibility)s) AS v ON v.room_id = r.room_id" - " LEFT JOIN (%(guest_access)s) AS g ON g.room_id = r.room_id" - " WHERE r.is_public = ?" - " GROUP BY r.room_id" % { - "topic": subquery("topics", "topic"), - "name": subquery("room_names", "name"), - "history_visibility": subquery("history_visibility"), - "guest_access": subquery("guest_access"), - } - ) - - txn.execute(sql, (is_public,)) - - rows = txn.fetchall() - - for i, row in enumerate(rows): - room_id = row[0] - aliases = self._simple_select_onecol_txn( - txn, - table="room_aliases", - keyvalues={ - "room_id": room_id - }, - retcol="room_alias", - ) + sql = "SELECT count(*) FROM rooms" + txn.execute(sql) + row = txn.fetchone() + return row[0] or 0 - rows[i] = list(row) + [aliases] - - return rows - - rows = yield self.runInteraction( + return self.runInteraction( "get_rooms", f ) - ret = [ - { - "room_id": r[0], - "name": r[1], - "topic": r[2], - "world_readable": r[3] == "world_readable", - "guest_can_join": r[4] == "can_join", - "aliases": r[5], - } - for r in rows - if r[5] # We only return rooms that have at least one alias. - ] - - defer.returnValue(ret) - def _store_room_topic_txn(self, txn, event): if hasattr(event, "content") and "topic" in event.content: self._simple_insert_txn( diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 7fdbfc60f1..0baaf3df21 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -51,32 +51,6 @@ class RoomStoreTestCase(unittest.TestCase): (yield self.store.get_room(self.room.to_string())) ) - @defer.inlineCallbacks - def test_get_rooms(self): - # get_rooms does an INNER JOIN on the room_aliases table :( - - rooms = yield self.store.get_rooms(is_public=True) - # Should be empty before we add the alias - self.assertEquals([], rooms) - - yield self.store.create_room_alias_association( - room_alias=self.alias, - room_id=self.room.to_string(), - servers=["test"] - ) - - rooms = yield self.store.get_rooms(is_public=True) - - self.assertEquals(1, len(rooms)) - self.assertEquals({ - "name": None, - "room_id": self.room.to_string(), - "topic": None, - "aliases": [self.alias.to_string()], - "world_readable": False, - "guest_can_join": False, - }, rooms[0]) - class RoomEventsStoreTestCase(unittest.TestCase): -- cgit 1.4.1 From b84d59c5f01914fe53d2673c5c7e372f5c61d088 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:22:35 +0000 Subject: Add descriptions --- synapse/storage/appservice.py | 3 ++- synapse/storage/keys.py | 1 + synapse/storage/registration.py | 1 + synapse/storage/stream.py | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index b5aa55c0a3..1100c67714 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -276,7 +276,8 @@ class ApplicationServiceTransactionStore(SQLBaseStore): "application_services_state", dict(as_id=service.id), ["state"], - allow_none=True + allow_none=True, + desc="get_appservice_state", ) if result: defer.returnValue(result.get("state")) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 8022b8cfc6..fd05bfe54e 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -39,6 +39,7 @@ class KeyStore(SQLBaseStore): table="server_tls_certificates", keyvalues={"server_name": server_name}, retcols=("tls_certificate",), + desc="get_server_certificate", ) tls_certificate = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_ASN1, tls_certificate_bytes, diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 70cde0d04d..bd35e19be6 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -134,6 +134,7 @@ class RegistrationStore(SQLBaseStore): }, retcols=["name", "password_hash", "is_guest"], allow_none=True, + desc="get_user_by_id", ) def get_users_by_id_case_insensitive(self, user_id): diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 2c49a5e499..50436cb2d2 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -564,6 +564,7 @@ class StreamStore(SQLBaseStore): table="events", keyvalues={"event_id": event_id}, retcols=("stream_ordering", "topological_ordering"), + desc="get_topological_token_for_event", ).addCallback(lambda row: "t%d-%d" % ( row["topological_ordering"], row["stream_ordering"],) ) -- cgit 1.4.1 From aa4af94c69b8b1c263dacfce0358aaef97d3e323 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:29:32 +0000 Subject: We return dicts now. --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index aca3219206..2742e0c008 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -72,7 +72,7 @@ class EventPushActionsStore(SQLBaseStore): ) results = txn.fetchall() if len(results) == 0: - return [] + return {} stream_ordering = results[0][0] topological_ordering = results[0][1] -- cgit 1.4.1 From 4d36e732307ad35eb070af384058f227d7d85dd0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:35:00 +0000 Subject: Actually return something sensible --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 2742e0c008..d0a969f50b 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -72,7 +72,7 @@ class EventPushActionsStore(SQLBaseStore): ) results = txn.fetchall() if len(results) == 0: - return {} + return {"notify_count": 0, "highlight_count": 0} stream_ordering = results[0][0] topological_ordering = results[0][1] -- cgit 1.4.1 From 79a1c0574b33955d28bfb12697ccd5a7be779b36 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 5 Feb 2016 11:22:30 +0000 Subject: Allocate guest user IDs numericcally The current random IDs are ugly and confusing when presented in UIs. This makes them prettier and easier to read. Also, disable non-automated registration of numeric IDs so that we don't need to worry so much about people carving out our automated address space and us needing to keep retrying ID registration. --- synapse/handlers/register.py | 55 +++++++++++++++++++++++++++-------------- synapse/storage/registration.py | 36 +++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 19 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index b8fbcf9233..2660fd21a2 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -21,7 +21,6 @@ from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) from ._base import BaseHandler -import synapse.util.stringutils as stringutils from synapse.util.async import run_on_reactor from synapse.http.client import CaptchaServerHttpClient @@ -45,6 +44,8 @@ class RegistrationHandler(BaseHandler): self.distributor.declare("registered_user") self.captcha_client = CaptchaServerHttpClient(hs) + self._next_generated_user_id = None + @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None): yield run_on_reactor() @@ -91,7 +92,7 @@ class RegistrationHandler(BaseHandler): Args: localpart : The local part of the user ID to register. If None, - one will be randomly generated. + one will be generated. password (str) : The password to assign to this user so they can login again. This can be None which means they cannot login again via a password (e.g. the user is an application service user). @@ -108,6 +109,18 @@ class RegistrationHandler(BaseHandler): if localpart: yield self.check_username(localpart, guest_access_token=guest_access_token) + was_guest = guest_access_token is not None + + if not was_guest: + try: + int(localpart) + raise RegistrationError( + 400, + "Numeric user IDs are reserved for guest users." + ) + except ValueError: + pass + user = UserID(localpart, self.hs.hostname) user_id = user.to_string() @@ -118,40 +131,36 @@ class RegistrationHandler(BaseHandler): user_id=user_id, token=token, password_hash=password_hash, - was_guest=guest_access_token is not None, + was_guest=was_guest, make_guest=make_guest, ) yield registered_user(self.distributor, user) else: - # autogen a random user ID + # autogen a sequential user ID attempts = 0 - user_id = None token = None - while not user_id: + user = None + while not user: + localpart = yield self._generate_user_id(attempts > 0) + user = UserID(localpart, self.hs.hostname) + user_id = user.to_string() + yield self.check_user_id_is_valid(user_id) + if generate_token: + token = self.auth_handler().generate_access_token(user_id) try: - localpart = self._generate_user_id() - user = UserID(localpart, self.hs.hostname) - user_id = user.to_string() - yield self.check_user_id_is_valid(user_id) - if generate_token: - token = self.auth_handler().generate_access_token(user_id) yield self.store.register( user_id=user_id, token=token, password_hash=password_hash, make_guest=make_guest ) - - yield registered_user(self.distributor, user) except SynapseError: # if user id is taken, just generate another user_id = None token = None attempts += 1 - if attempts > 5: - raise RegistrationError( - 500, "Cannot generate user ID.") + yield registered_user(self.distributor, user) # We used to generate default identicons here, but nowadays # we want clients to generate their own as part of their branding @@ -283,8 +292,16 @@ class RegistrationHandler(BaseHandler): errcode=Codes.EXCLUSIVE ) - def _generate_user_id(self): - return "-" + stringutils.random_string(18) + @defer.inlineCallbacks + def _generate_user_id(self, reseed=False): + if reseed or self._next_generated_user_id is None: + self._next_generated_user_id = ( + yield self.store.find_next_generated_user_id_localpart() + ) + + id = self._next_generated_user_id + self._next_generated_user_id += 1 + defer.returnValue(str(id)) @defer.inlineCallbacks def _validate_captcha(self, ip_addr, private_key, challenge, response): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index bd35e19be6..967c732bda 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re + from twisted.internet import defer from synapse.api.errors import StoreError, Codes @@ -351,3 +353,37 @@ class RegistrationStore(SQLBaseStore): ret = yield self.runInteraction("count_users", _count_users) defer.returnValue(ret) + + @defer.inlineCallbacks + def find_next_generated_user_id_localpart(self): + """ + Gets the localpart of the next generated user ID. + + Generated user IDs are integers, and we aim for them to be as small as + we can. Unfortunately, it's possible some of them are already taken by + existing users, and there may be gaps in the already taken range. This + function returns the start of the first allocatable gap. This is to + avoid the case of ID 10000000 being pre-allocated, so us wasting the + first (and shortest) many generated user IDs. + """ + def _find_next_generated_user_id(txn): + txn.execute("SELECT name FROM users") + rows = self.cursor_to_dict(txn) + + regex = re.compile("^@(\d+):") + + found = set() + + for r in rows: + user_id = r["name"] + match = regex.search(user_id) + if match: + found.add(int(match.group(1))) + for i in xrange(len(found) + 1): + if i not in found: + return i + + defer.returnValue((yield self.runInteraction( + "find_next_generated_user_id", + _find_next_generated_user_id + ))) -- cgit 1.4.1 From 2c1fbea5319db2c64fa486adb32b5e66680b6daf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Feb 2016 10:22:44 +0000 Subject: Fix up logcontexts --- synapse/api/auth.py | 4 +- synapse/app/homeserver.py | 2 + synapse/crypto/keyring.py | 83 ++++++++++++----------- synapse/federation/federation_server.py | 4 +- synapse/federation/transaction_queue.py | 3 - synapse/handlers/_base.py | 10 +-- synapse/handlers/events.py | 11 +++- synapse/handlers/federation.py | 50 ++------------ synapse/handlers/presence.py | 20 +++--- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 11 +++- synapse/handlers/sync.py | 40 ++++++------ synapse/http/server.py | 5 +- synapse/notifier.py | 58 ++++++++-------- synapse/push/__init__.py | 2 +- synapse/push/pusherpool.py | 9 +-- synapse/rest/client/v2_alpha/account_data.py | 4 +- synapse/rest/client/v2_alpha/tags.py | 4 +- synapse/storage/_base.py | 18 ++--- synapse/storage/events.py | 34 ++++++---- synapse/storage/presence.py | 5 +- synapse/storage/stream.py | 9 +-- synapse/util/__init__.py | 6 +- synapse/util/async.py | 11 +++- synapse/util/caches/descriptors.py | 16 +++-- synapse/util/caches/snapshot_cache.py | 3 +- synapse/util/distributor.py | 15 +++-- synapse/util/logcontext.py | 98 ++++++++++++++++++++++++++-- synapse/util/logutils.py | 35 ++++++++++ synapse/util/metrics.py | 10 +-- synapse/util/ratelimitutils.py | 3 +- 31 files changed, 356 insertions(+), 229 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 5bba9343f6..e2f84c4d57 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership, JoinRules from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError from synapse.types import Requester, RoomID, UserID, EventID from synapse.util.logutils import log_function +from synapse.util.logcontext import preserve_context_over_fn from unpaddedbase64 import decode_base64 import logging @@ -529,7 +530,8 @@ class Auth(object): default=[""] )[0] if user and access_token and ip_addr: - self.store.insert_client_ip( + preserve_context_over_fn( + self.store.insert_client_ip, user=user, access_token=access_token, ip=ip_addr, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e5c7e39cf9..2b4be7bdd0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -709,6 +709,8 @@ def run(hs): phone_home_task.start(60 * 60 * 24, now=False) def in_thread(): + # Uncomment to enable tracing of log context changes. + # sys.settrace(logcontext_tracer) with LoggingContext("run"): change_resource_limit(hs.config.soft_file_limit) reactor.run() diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index cddec0b2bc..d08ee0aa91 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -18,6 +18,10 @@ from synapse.api.errors import SynapseError, Codes from synapse.util.retryutils import get_retry_limiter from synapse.util import unwrapFirstError from synapse.util.async import ObservableDeferred +from synapse.util.logcontext import ( + preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext, + preserve_fn +) from twisted.internet import defer @@ -142,40 +146,43 @@ class Keyring(object): for server_name, _ in server_and_json } - # We want to wait for any previous lookups to complete before - # proceeding. - wait_on_deferred = self.wait_for_previous_lookups( - [server_name for server_name, _ in server_and_json], - server_to_deferred, - ) + with PreserveLoggingContext(): - # Actually start fetching keys. - wait_on_deferred.addBoth( - lambda _: self.get_server_verify_keys(group_id_to_group, deferreds) - ) + # We want to wait for any previous lookups to complete before + # proceeding. + wait_on_deferred = self.wait_for_previous_lookups( + [server_name for server_name, _ in server_and_json], + server_to_deferred, + ) - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - server_to_gids = {} + # Actually start fetching keys. + wait_on_deferred.addBoth( + lambda _: self.get_server_verify_keys(group_id_to_group, deferreds) + ) + + # When we've finished fetching all the keys for a given server_name, + # resolve the deferred passed to `wait_for_previous_lookups` so that + # any lookups waiting will proceed. + server_to_gids = {} - def remove_deferreds(res, server_name, group_id): - server_to_gids[server_name].discard(group_id) - if not server_to_gids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) - return res + def remove_deferreds(res, server_name, group_id): + server_to_gids[server_name].discard(group_id) + if not server_to_gids[server_name]: + d = server_to_deferred.pop(server_name, None) + if d: + d.callback(None) + return res - for g_id, deferred in deferreds.items(): - server_name = group_id_to_group[g_id].server_name - server_to_gids.setdefault(server_name, set()).add(g_id) - deferred.addBoth(remove_deferreds, server_name, g_id) + for g_id, deferred in deferreds.items(): + server_name = group_id_to_group[g_id].server_name + server_to_gids.setdefault(server_name, set()).add(g_id) + deferred.addBoth(remove_deferreds, server_name, g_id) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified return [ - handle_key_deferred( + preserve_context_over_fn( + handle_key_deferred, group_id_to_group[g_id], deferreds[g_id], ) @@ -198,12 +205,13 @@ class Keyring(object): if server_name in self.key_downloads ] if wait_on: - yield defer.DeferredList(wait_on) + with PreserveLoggingContext(): + yield defer.DeferredList(wait_on) else: break for server_name, deferred in server_to_deferred.items(): - d = ObservableDeferred(deferred) + d = ObservableDeferred(preserve_context_over_deferred(deferred)) self.key_downloads[server_name] = d def rm(r, server_name): @@ -244,12 +252,13 @@ class Keyring(object): for group in group_id_to_group.values(): for key_id in group.key_ids: if key_id in merged_results[group.server_name]: - group_id_to_deferred[group.group_id].callback(( - group.group_id, - group.server_name, - key_id, - merged_results[group.server_name][key_id], - )) + with PreserveLoggingContext(): + group_id_to_deferred[group.group_id].callback(( + group.group_id, + group.server_name, + key_id, + merged_results[group.server_name][key_id], + )) break else: missing_groups.setdefault( @@ -504,7 +513,7 @@ class Keyring(object): yield defer.gatherResults( [ - self.store_keys( + preserve_fn(self.store_keys)( server_name=key_server_name, from_server=server_name, verify_keys=verify_keys, @@ -573,7 +582,7 @@ class Keyring(object): yield defer.gatherResults( [ - self.store.store_server_keys_json( + preserve_fn(self.store.store_server_keys_json)( server_name=server_name, key_id=key_id, from_server=server_name, @@ -675,7 +684,7 @@ class Keyring(object): # TODO(markjh): Store whether the keys have expired. yield defer.gatherResults( [ - self.store.store_server_verify_key( + preserve_fn(self.store.store_server_verify_key)( server_name, server_name, key.time_added, key ) for key_id, key in verify_keys.items() diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a97aa0c94a..90718192dd 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -126,10 +126,8 @@ class FederationServer(FederationBase): results = [] for pdu in pdu_list: - d = self._handle_new_pdu(transaction.origin, pdu) - try: - yield d + yield self._handle_new_pdu(transaction.origin, pdu) results.append({}) except FederationError as e: self.send_failure(e, transaction.origin) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 622adad3ae..1928da03b3 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -103,7 +103,6 @@ class TransactionQueue(object): else: return not destination.startswith("localhost") - @defer.inlineCallbacks def enqueue_pdu(self, pdu, destinations, order): # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus @@ -141,8 +140,6 @@ class TransactionQueue(object): deferreds.append(deferred) - yield defer.DeferredList(deferreds, consumeErrors=True) - # NO inlineCallbacks def enqueue_edu(self, edu): destination = edu.destination diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 1423df6cf3..fa83d3e464 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -293,19 +293,11 @@ class BaseHandler(object): with PreserveLoggingContext(): # Don't block waiting on waking up all the listeners. - notify_d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - notify_d.addErrback(log_failure) - # If invite, remove room_state from unsigned before sending. event.unsigned.pop("invite_room_state", None) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 5ad8f3779a..4933c31c19 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.util.logutils import log_function from synapse.types import UserID from synapse.events.utils import serialize_event +from synapse.util.logcontext import preserve_context_over_fn from ._base import BaseHandler @@ -29,11 +30,17 @@ logger = logging.getLogger(__name__) def started_user_eventstream(distributor, user): - return distributor.fire("started_user_eventstream", user) + return preserve_context_over_fn( + distributor.fire, + "started_user_eventstream", user + ) def stopped_user_eventstream(distributor, user): - return distributor.fire("stopped_user_eventstream", user) + return preserve_context_over_fn( + distributor.fire, + "stopped_user_eventstream", user + ) class EventStreamHandler(BaseHandler): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2ce1e9d6c7..b78b0502d9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -221,19 +221,11 @@ class FederationHandler(BaseHandler): extra_users.append(target_user) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - if event.type == EventTypes.Member: if event.membership == Membership.JOIN: prev_state = context.current_state.get((event.type, event.state_key)) @@ -643,19 +635,11 @@ class FederationHandler(BaseHandler): ) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=[joinee] ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - logger.debug("Finished joining %s to %s", joinee, room_id) finally: room_queue = self.room_queues[room_id] @@ -730,18 +714,10 @@ class FederationHandler(BaseHandler): extra_users.append(target_user) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - if event.type == EventTypes.Member: if event.content["membership"] == Membership.JOIN: user = UserID.from_string(event.state_key) @@ -811,19 +787,11 @@ class FederationHandler(BaseHandler): target_user = UserID.from_string(event.state_key) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=[target_user], ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - defer.returnValue(event) @defer.inlineCallbacks @@ -948,18 +916,10 @@ class FederationHandler(BaseHandler): extra_users.append(target_user) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - new_pdu = event destinations = set() diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d0c21ff5c9..b61394f2b5 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -378,9 +378,9 @@ class PresenceHandler(BaseHandler): was_polling = target_user in self._user_cachemap if now_online and not was_polling: - self.start_polling_presence(target_user, state=state) + yield self.start_polling_presence(target_user, state=state) elif not now_online and was_polling: - self.stop_polling_presence(target_user) + yield self.stop_polling_presence(target_user) # TODO(paul): perform a presence push as part of start/stop poll so # we don't have to do this all the time @@ -394,7 +394,8 @@ class PresenceHandler(BaseHandler): if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY: return - self.changed_presencelike_data(user, {"last_active": now}) + with PreserveLoggingContext(): + self.changed_presencelike_data(user, {"last_active": now}) def get_joined_rooms_for_user(self, user): """Get the list of rooms a user is joined to. @@ -466,11 +467,12 @@ class PresenceHandler(BaseHandler): local_user, room_ids=[room_id], add_to_cache=False ) - self.push_update_to_local_and_remote( - observed_user=local_user, - users_to_push=[user], - statuscache=statuscache, - ) + with PreserveLoggingContext(): + self.push_update_to_local_and_remote( + observed_user=local_user, + users_to_push=[user], + statuscache=statuscache, + ) @defer.inlineCallbacks def send_presence_invite(self, observer_user, observed_user): @@ -556,7 +558,7 @@ class PresenceHandler(BaseHandler): observer_user.localpart, observed_user.to_string() ) - self.start_polling_presence( + yield self.start_polling_presence( observer_user, target_user=observed_user ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2660fd21a2..24c850ae9b 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -186,7 +186,7 @@ class RegistrationHandler(BaseHandler): token=token, password_hash="" ) - registered_user(self.distributor, user) + yield registered_user(self.distributor, user) defer.returnValue((user_id, token)) @defer.inlineCallbacks diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index bfd7e44e9f..a8e3a9029c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -25,6 +25,7 @@ from synapse.api.constants import ( from synapse.api.errors import AuthError, StoreError, SynapseError, Codes from synapse.util import stringutils, unwrapFirstError from synapse.util.async import run_on_reactor +from synapse.util.logcontext import preserve_context_over_fn from signedjson.sign import verify_signed_json from signedjson.key import decode_verify_key_bytes @@ -46,11 +47,17 @@ def collect_presencelike_data(distributor, user, content): def user_left_room(distributor, user, room_id): - return distributor.fire("user_left_room", user=user, room_id=room_id) + return preserve_context_over_fn( + distributor.fire, + "user_left_room", user=user, room_id=room_id + ) def user_joined_room(distributor, user, room_id): - return distributor.fire("user_joined_room", user=user, room_id=room_id) + return preserve_context_over_fn( + distributor.fire, + "user_joined_room", user=user, room_id=room_id + ) class RoomCreationHandler(BaseHandler): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 72271f2626..3f1cda5b0b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,7 +18,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from twisted.internet import defer @@ -241,15 +241,16 @@ class SyncHandler(BaseHandler): deferreds = [] for event in room_list: if event.membership == Membership.JOIN: - room_sync_deferred = self.full_state_sync_for_joined_room( - room_id=event.room_id, - sync_config=sync_config, - now_token=now_token, - timeline_since_token=timeline_since_token, - ephemeral_by_room=ephemeral_by_room, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - ) + with PreserveLoggingContext(LoggingContext.current_context()): + room_sync_deferred = self.full_state_sync_for_joined_room( + room_id=event.room_id, + sync_config=sync_config, + now_token=now_token, + timeline_since_token=timeline_since_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + ) room_sync_deferred.addCallback(joined.append) deferreds.append(room_sync_deferred) elif event.membership == Membership.INVITE: @@ -262,15 +263,16 @@ class SyncHandler(BaseHandler): leave_token = now_token.copy_and_replace( "room_key", "s%d" % (event.stream_ordering,) ) - room_sync_deferred = self.full_state_sync_for_archived_room( - sync_config=sync_config, - room_id=event.room_id, - leave_event_id=event.event_id, - leave_token=leave_token, - timeline_since_token=timeline_since_token, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - ) + with PreserveLoggingContext(LoggingContext.current_context()): + room_sync_deferred = self.full_state_sync_for_archived_room( + sync_config=sync_config, + room_id=event.room_id, + leave_event_id=event.event_id, + leave_token=leave_token, + timeline_since_token=timeline_since_token, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + ) room_sync_deferred.addCallback(archived.append) deferreds.append(room_sync_deferred) diff --git a/synapse/http/server.py b/synapse/http/server.py index 06935783ca..a90e2e1125 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -99,9 +99,8 @@ def request_handler(request_handler): request_context.request = request_id with request.processing(): try: - d = request_handler(self, request) - with PreserveLoggingContext(): - yield d + with PreserveLoggingContext(request_context): + yield request_handler(self, request) except CodeMessageException as e: code = e.code if isinstance(e, SynapseError): diff --git a/synapse/notifier.py b/synapse/notifier.py index 1a90bd55cd..560866b26e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -18,7 +18,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.util.logutils import log_function -from synapse.util.async import run_on_reactor, ObservableDeferred +from synapse.util.async import ObservableDeferred +from synapse.util.logcontext import PreserveLoggingContext from synapse.types import StreamToken import synapse.metrics @@ -73,7 +74,8 @@ class _NotifierUserStream(object): self.current_token = current_token self.last_notified_ms = time_now_ms - self.notify_deferred = ObservableDeferred(defer.Deferred()) + with PreserveLoggingContext(): + self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify(self, stream_key, stream_id, time_now_ms): """Notify any listeners for this user of a new event from an @@ -88,8 +90,10 @@ class _NotifierUserStream(object): ) self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred - self.notify_deferred = ObservableDeferred(defer.Deferred()) - noify_deferred.callback(self.current_token) + + with PreserveLoggingContext(): + self.notify_deferred = ObservableDeferred(defer.Deferred()) + noify_deferred.callback(self.current_token) def remove(self, notifier): """ Remove this listener from all the indexes in the Notifier @@ -184,8 +188,6 @@ class Notifier(object): lambda: count(bool, self.appservice_to_user_streams.values()), ) - @log_function - @defer.inlineCallbacks def on_new_room_event(self, event, room_stream_id, max_room_stream_id, extra_users=[]): """ Used by handlers to inform the notifier something has happened @@ -199,12 +201,11 @@ class Notifier(object): until all previous events have been persisted before notifying the client streams. """ - yield run_on_reactor() - - self.pending_new_room_events.append(( - room_stream_id, event, extra_users - )) - self._notify_pending_new_room_events(max_room_stream_id) + with PreserveLoggingContext(): + self.pending_new_room_events.append(( + room_stream_id, event, extra_users + )) + self._notify_pending_new_room_events(max_room_stream_id) def _notify_pending_new_room_events(self, max_room_stream_id): """Notify for the room events that were queued waiting for a previous @@ -251,31 +252,29 @@ class Notifier(object): extra_streams=app_streams, ) - @defer.inlineCallbacks - @log_function def on_new_event(self, stream_key, new_token, users=[], rooms=[], extra_streams=set()): """ Used to inform listeners that something has happend event wise. Will wake up all listeners for the given users and rooms. """ - yield run_on_reactor() - user_streams = set() + with PreserveLoggingContext(): + user_streams = set() - for user in users: - user_stream = self.user_to_user_stream.get(str(user)) - if user_stream is not None: - user_streams.add(user_stream) + for user in users: + user_stream = self.user_to_user_stream.get(str(user)) + if user_stream is not None: + user_streams.add(user_stream) - for room in rooms: - user_streams |= self.room_to_user_streams.get(room, set()) + for room in rooms: + user_streams |= self.room_to_user_streams.get(room, set()) - time_now_ms = self.clock.time_msec() - for user_stream in user_streams: - try: - user_stream.notify(stream_key, new_token, time_now_ms) - except: - logger.exception("Failed to notify listener") + time_now_ms = self.clock.time_msec() + for user_stream in user_streams: + try: + user_stream.notify(stream_key, new_token, time_now_ms) + except: + logger.exception("Failed to notify listener") @defer.inlineCallbacks def wait_for_events(self, user_id, timeout, callback, room_ids=None, @@ -325,7 +324,8 @@ class Notifier(object): # that we don't miss any current_token updates. prev_token = current_token listener = user_stream.new_listener(prev_token) - yield listener.deferred + with PreserveLoggingContext(): + yield listener.deferred except defer.CancelledError: break diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 64e581b8ba..8da2d8716c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -111,7 +111,7 @@ class Pusher(object): self.user_id, config, timeout=0, affect_presence=False ) self.last_token = chunk['end'] - self.store.update_pusher_last_token( + yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token ) logger.info("New pusher %s for user %s starting from token %s", diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d1b7c0802f..d7dcb2de4b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -18,6 +18,7 @@ from twisted.internet import defer from httppusher import HttpPusher from synapse.push import PusherConfigException +from synapse.util.logcontext import preserve_fn import logging @@ -76,7 +77,7 @@ class PusherPool: "Removing pusher for app id %s, pushkey %s, user %s", app_id, pushkey, p['user_name'] ) - self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) + yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks def remove_pushers_by_user(self, user_id): @@ -91,7 +92,7 @@ class PusherPool: "Removing pusher for app id %s, pushkey %s, user %s", p['app_id'], p['pushkey'], p['user_name'] ) - self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) + yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks def _add_pusher_to_store(self, user_id, access_token, profile_tag, kind, @@ -110,7 +111,7 @@ class PusherPool: lang=lang, data=data, ) - self._refresh_pusher(app_id, pushkey, user_id) + yield self._refresh_pusher(app_id, pushkey, user_id) def _create_pusher(self, pusherdict): if pusherdict['kind'] == 'http': @@ -166,7 +167,7 @@ class PusherPool: if fullid in self.pushers: self.pushers[fullid].stop() self.pushers[fullid] = p - p.start() + preserve_fn(p.start)() logger.info("Started pushers") diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 985efe2a62..1456881c1a 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -57,7 +57,7 @@ class AccountDataServlet(RestServlet): user_id, account_data_type, body ) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) @@ -99,7 +99,7 @@ class RoomAccountDataServlet(RestServlet): user_id, room_id, account_data_type, body ) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 42f2203f3d..79c436a8cf 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -80,7 +80,7 @@ class TagServlet(RestServlet): max_id = yield self.store.add_tag_to_room(user_id, room_id, tag, body) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) @@ -94,7 +94,7 @@ class TagServlet(RestServlet): max_id = yield self.store.remove_tag_from_room(user_id, room_id, tag) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index cfb87d9328..2e97ac84a8 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -15,7 +15,7 @@ import logging from synapse.api.errors import StoreError -from synapse.util.logcontext import preserve_context_over_fn, LoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache import synapse.metrics @@ -298,10 +298,10 @@ class SQLBaseStore(object): func, *args, **kwargs ) - result = yield preserve_context_over_fn( - self._db_pool.runWithConnection, - inner_func, *args, **kwargs - ) + with PreserveLoggingContext(): + result = yield self._db_pool.runWithConnection( + inner_func, *args, **kwargs + ) for after_callback, after_args in after_callbacks: after_callback(*after_args) @@ -326,10 +326,10 @@ class SQLBaseStore(object): return func(conn, *args, **kwargs) - result = yield preserve_context_over_fn( - self._db_pool.runWithConnection, - inner_func, *args, **kwargs - ) + with PreserveLoggingContext(): + result = yield self._db_pool.runWithConnection( + inner_func, *args, **kwargs + ) defer.returnValue(result) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 4d7cdd00d0..c6ed54721c 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -19,7 +19,7 @@ from twisted.internet import defer, reactor from synapse.events import FrozenEvent, USE_FROZEN_DICTS from synapse.events.utils import prune_event -from synapse.util.logcontext import preserve_context_over_deferred +from synapse.util.logcontext import preserve_fn, PreserveLoggingContext from synapse.util.logutils import log_function from synapse.api.constants import EventTypes @@ -664,14 +664,16 @@ class EventsStore(SQLBaseStore): for ids, d in lst: if not d.called: try: - d.callback([ - res[i] - for i in ids - if i in res - ]) + with PreserveLoggingContext(): + d.callback([ + res[i] + for i in ids + if i in res + ]) except: logger.exception("Failed to callback") - reactor.callFromThread(fire, event_list, row_dict) + with PreserveLoggingContext(): + reactor.callFromThread(fire, event_list, row_dict) except Exception as e: logger.exception("do_fetch") @@ -679,10 +681,12 @@ class EventsStore(SQLBaseStore): def fire(evs): for _, d in evs: if not d.called: - d.errback(e) + with PreserveLoggingContext(): + d.errback(e) if event_list: - reactor.callFromThread(fire, event_list) + with PreserveLoggingContext(): + reactor.callFromThread(fire, event_list) @defer.inlineCallbacks def _enqueue_events(self, events, check_redacted=True, @@ -709,18 +713,20 @@ class EventsStore(SQLBaseStore): should_start = False if should_start: - self.runWithConnection( - self._do_fetch - ) + with PreserveLoggingContext(): + self.runWithConnection( + self._do_fetch + ) - rows = yield preserve_context_over_deferred(events_d) + with PreserveLoggingContext(): + rows = yield events_d if not allow_rejected: rows[:] = [r for r in rows if not r["rejects"]] res = yield defer.gatherResults( [ - self._get_event_from_row( + preserve_fn(self._get_event_from_row)( row["internal_metadata"], row["json"], row["redacts"], check_redacted=check_redacted, get_prev_content=get_prev_content, diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 9b3aecaf8c..ef525f34c5 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -68,8 +68,9 @@ class PresenceStore(SQLBaseStore): for row in rows }) + @defer.inlineCallbacks def set_presence_state(self, user_localpart, new_state): - res = self._simple_update_one( + res = yield self._simple_update_one( table="presence", keyvalues={"user_id": user_localpart}, updatevalues={"state": new_state["state"], @@ -79,7 +80,7 @@ class PresenceStore(SQLBaseStore): ) self.get_presence_state.invalidate((user_localpart,)) - return res + defer.returnValue(res) def allow_presence_visible(self, observed_localpart, observer_userid): return self._simple_insert( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 50436cb2d2..367ffc9543 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -39,6 +39,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken +from synapse.util.logcontext import preserve_fn import logging @@ -170,12 +171,12 @@ class StreamStore(SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ - self.get_room_events_stream_for_room( - room_id, from_key, to_key, limit - ).addCallback(lambda r, rm: (rm, r), room_id) + preserve_fn(self.get_room_events_stream_for_room)( + room_id, from_key, to_key, limit, + ) for room_id in room_ids ]) - results.update(dict(res)) + results.update(dict(zip(rm_ids, res))) defer.returnValue(results) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 7566d9eb33..133671e238 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.logcontext import PreserveLoggingContext from twisted.internet import defer, reactor, task @@ -61,10 +61,8 @@ class Clock(object): *args: Postional arguments to pass to function. **kwargs: Key arguments to pass to function. """ - current_context = LoggingContext.current_context() - def wrapped_callback(*args, **kwargs): - with PreserveLoggingContext(current_context): + with PreserveLoggingContext(): callback(*args, **kwargs) with PreserveLoggingContext(): diff --git a/synapse/util/async.py b/synapse/util/async.py index 200edd404c..640fae3890 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -16,13 +16,16 @@ from twisted.internet import defer, reactor -from .logcontext import preserve_context_over_deferred +from .logcontext import PreserveLoggingContext +@defer.inlineCallbacks def sleep(seconds): d = defer.Deferred() - reactor.callLater(seconds, d.callback, seconds) - return preserve_context_over_deferred(d) + with PreserveLoggingContext(): + reactor.callLater(seconds, d.callback, seconds) + res = yield d + defer.returnValue(res) def run_on_reactor(): @@ -54,6 +57,7 @@ class ObservableDeferred(object): object.__setattr__(self, "_result", (True, r)) while self._observers: try: + # TODO: Handle errors here. self._observers.pop().callback(r) except: pass @@ -63,6 +67,7 @@ class ObservableDeferred(object): object.__setattr__(self, "_result", (False, f)) while self._observers: try: + # TODO: Handle errors here. self._observers.pop().errback(f) except: pass diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index e27917c63a..277854ccbc 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -18,6 +18,9 @@ from synapse.util.async import ObservableDeferred from synapse.util import unwrapFirstError from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache +from synapse.util.logcontext import ( + PreserveLoggingContext, preserve_context_over_deferred, preserve_context_over_fn +) from . import caches_by_name, DEBUG_CACHES, cache_counter @@ -190,7 +193,7 @@ class CacheDescriptor(object): defer.returnValue(cached_result) observer.addCallback(check_result) - return observer + return preserve_context_over_deferred(observer) except KeyError: # Get the sequence number of the cache before reading from the # database so that we can tell if the cache is invalidated @@ -198,6 +201,7 @@ class CacheDescriptor(object): sequence = self.cache.sequence ret = defer.maybeDeferred( + preserve_context_over_fn, self.function_to_call, obj, *args, **kwargs ) @@ -211,7 +215,7 @@ class CacheDescriptor(object): ret = ObservableDeferred(ret, consumeErrors=True) self.cache.update(sequence, cache_key, ret) - return ret.observe() + return preserve_context_over_deferred(ret.observe()) wrapped.invalidate = self.cache.invalidate wrapped.invalidate_all = self.cache.invalidate_all @@ -299,6 +303,7 @@ class CacheListDescriptor(object): args_to_call[self.list_name] = missing ret_d = defer.maybeDeferred( + preserve_context_over_fn, self.function_to_call, **args_to_call ) @@ -308,7 +313,8 @@ class CacheListDescriptor(object): # We need to create deferreds for each arg in the list so that # we can insert the new deferred into the cache. for arg in missing: - observer = ret_d.observe() + with PreserveLoggingContext(): + observer = ret_d.observe() observer.addCallback(lambda r, arg: r.get(arg, None), arg) observer = ObservableDeferred(observer) @@ -327,10 +333,10 @@ class CacheListDescriptor(object): cached[arg] = res - return defer.gatherResults( + return preserve_context_over_deferred(defer.gatherResults( cached.values(), consumeErrors=True, - ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res)) + ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res))) obj.__dict__[self.orig.__name__] = wrapped diff --git a/synapse/util/caches/snapshot_cache.py b/synapse/util/caches/snapshot_cache.py index b1e40417fd..d03678b8c8 100644 --- a/synapse/util/caches/snapshot_cache.py +++ b/synapse/util/caches/snapshot_cache.py @@ -87,7 +87,8 @@ class SnapshotCache(object): # expire from the rotation of that cache. self.next_result_cache[key] = result self.pending_result_cache.pop(key, None) + return r - result.observe().addBoth(shuffle_along) + result.addBoth(shuffle_along) return result.observe() diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 4ebfebf701..8875813de4 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -15,9 +15,7 @@ from twisted.internet import defer -from synapse.util.logcontext import ( - PreserveLoggingContext, preserve_context_over_deferred, -) +from synapse.util.logcontext import PreserveLoggingContext from synapse.util import unwrapFirstError @@ -97,6 +95,7 @@ class Signal(object): Each observer callable may return a Deferred.""" self.observers.append(observer) + @defer.inlineCallbacks def fire(self, *args, **kwargs): """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is @@ -116,6 +115,7 @@ class Signal(object): failure.getTracebackObject())) if not self.suppress_failures: return failure + return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb) with PreserveLoggingContext(): @@ -124,8 +124,11 @@ class Signal(object): for observer in self.observers ] - d = defer.gatherResults(deferreds, consumeErrors=True) + res = yield defer.gatherResults( + deferreds, consumeErrors=True + ).addErrback(unwrapFirstError) - d.addErrback(unwrapFirstError) + defer.returnValue(res) - return preserve_context_over_deferred(d) + def __repr__(self): + return "" % (self.name,) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index e701092cd8..9134e67908 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -48,7 +48,7 @@ class LoggingContext(object): __slots__ = [ "parent_context", "name", "usage_start", "usage_end", "main_thread", - "__dict__", "tag", + "__dict__", "tag", "alive", ] thread_local = threading.local() @@ -88,6 +88,7 @@ class LoggingContext(object): self.usage_start = None self.main_thread = threading.current_thread() self.tag = "" + self.alive = True def __str__(self): return "%s@%x" % (self.name, id(self)) @@ -106,6 +107,7 @@ class LoggingContext(object): The context that was previously active """ current = cls.current_context() + if current is not context: current.stop() cls.thread_local.current_context = context @@ -117,6 +119,7 @@ class LoggingContext(object): if self.parent_context is not None: raise Exception("Attempt to enter logging context multiple times") self.parent_context = self.set_current_context(self) + self.alive = True return self def __exit__(self, type, value, traceback): @@ -136,6 +139,7 @@ class LoggingContext(object): self ) self.parent_context = None + self.alive = False def __getattr__(self, name): """Delegate member lookup to parent context""" @@ -213,7 +217,7 @@ class PreserveLoggingContext(object): exited. Used to restore the context after a function using @defer.inlineCallbacks is resumed by a callback from the reactor.""" - __slots__ = ["current_context", "new_context"] + __slots__ = ["current_context", "new_context", "has_parent"] def __init__(self, new_context=LoggingContext.sentinel): self.new_context = new_context @@ -224,11 +228,26 @@ class PreserveLoggingContext(object): self.new_context ) + if self.current_context: + self.has_parent = self.current_context.parent_context is not None + if not self.current_context.alive: + logger.warn( + "Entering dead context: %s", + self.current_context, + ) + def __exit__(self, type, value, traceback): """Restores the current logging context""" - LoggingContext.set_current_context(self.current_context) + context = LoggingContext.set_current_context(self.current_context) + + if context != self.new_context: + logger.warn( + "Unexpected logging context: %s is not %s", + context, self.new_context, + ) + if self.current_context is not LoggingContext.sentinel: - if self.current_context.parent_context is None: + if not self.current_context.alive: logger.warn( "Restoring dead context: %s", self.current_context, @@ -289,3 +308,74 @@ def preserve_context_over_deferred(deferred): d = _PreservingContextDeferred(current_context) deferred.chainDeferred(d) return d + + +def preserve_fn(f): + """Ensures that function is called with correct context and that context is + restored after return. Useful for wrapping functions that return a deferred + which you don't yield on. + """ + current = LoggingContext.current_context() + + def g(*args, **kwargs): + with PreserveLoggingContext(current): + return f(*args, **kwargs) + + return g + + +# modules to ignore in `logcontext_tracer` +_to_ignore = [ + "synapse.util.logcontext", + "synapse.http.server", + "synapse.storage._base", + "synapse.util.async", +] + + +def logcontext_tracer(frame, event, arg): + """A tracer that logs whenever a logcontext "unexpectedly" changes within + a function. Probably inaccurate. + + Use by calling `sys.settrace(logcontext_tracer)` in the main thread. + """ + if event == 'call': + name = frame.f_globals["__name__"] + if name.startswith("synapse"): + if name == "synapse.util.logcontext": + if frame.f_code.co_name in ["__enter__", "__exit__"]: + tracer = frame.f_back.f_trace + if tracer: + tracer.just_changed = True + + tracer = frame.f_trace + if tracer: + return tracer + + if not any(name.startswith(ig) for ig in _to_ignore): + return LineTracer() + + +class LineTracer(object): + __slots__ = ["context", "just_changed"] + + def __init__(self): + self.context = LoggingContext.current_context() + self.just_changed = False + + def __call__(self, frame, event, arg): + if event in 'line': + if self.just_changed: + self.context = LoggingContext.current_context() + self.just_changed = False + else: + c = LoggingContext.current_context() + if c != self.context: + logger.info( + "Context changed! %s -> %s, %s, %s", + self.context, c, + frame.f_code.co_filename, frame.f_lineno + ) + self.context = c + + return self diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index c37a157787..3a83828d25 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -168,3 +168,38 @@ def trace_function(f): wrapped.__name__ = func_name return wrapped + + +def get_previous_frames(): + s = inspect.currentframe().f_back.f_back + to_return = [] + while s: + if s.f_globals["__name__"].startswith("synapse"): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + to_return.append("{{ %s:%d %s - Args: %s }}" % ( + filename, lineno, function, args_string + )) + + s = s.f_back + + return ", ". join(to_return) + + +def get_previous_frame(ignore=[]): + s = inspect.currentframe().f_back.f_back + + while s: + if s.f_globals["__name__"].startswith("synapse"): + if not any(s.f_globals["__name__"].startswith(ig) for ig in ignore): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + return "{{ %s:%d %s - Args: %s }}" % ( + filename, lineno, function, args_string + ) + + s = s.f_back + + return None diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index daf6087fe0..ca48007218 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -68,16 +68,18 @@ class Measure(object): block_timer.inc_by(duration, self.name) context = LoggingContext.current_context() - if not context: - return if context != self.start_context: logger.warn( - "Context have unexpectedly changed %r, %r", - context, self.start_context + "Context have unexpectedly changed from '%s' to '%s'. (%r)", + context, self.start_context, self.name ) return + if not context: + logger.warn("Expected context. (%r)", self.name) + return + ru_utime, ru_stime = context.get_resource_usage() block_ru_utime.inc_by(ru_utime, self.name) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index ea321bc6a9..4076eed269 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError from synapse.util.async import sleep +from synapse.util.logcontext import preserve_fn import collections import contextlib @@ -163,7 +164,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = sleep(self.sleep_msec / 1000.0) + ret_defer = preserve_fn(sleep)(self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) -- cgit 1.4.1 From eff12e838ce10588ca8103c9131dcfe2f2e7950e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 13:55:59 +0000 Subject: Don't load all ephemeral state for a room on every sync --- synapse/handlers/sync.py | 20 ++++++-------------- synapse/storage/receipts.py | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 14 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 446f8bbe93..6a5868f87e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -319,7 +319,6 @@ class SyncHandler(BaseHandler): ephemeral_by_room=ephemeral_by_room, tags_by_room=tags_by_room, account_data_by_room=account_data_by_room, - all_ephemeral_by_room=ephemeral_by_room, batch=batch, full_state=True, ) @@ -453,13 +452,6 @@ class SyncHandler(BaseHandler): ) now_token = now_token.copy_and_replace("presence_key", presence_key) - # We now fetch all ephemeral events for this room in order to get - # this users current read receipt. This could almost certainly be - # optimised. - _, all_ephemeral_by_room = yield self.ephemeral_by_room( - sync_config, now_token - ) - now_token, ephemeral_by_room = yield self.ephemeral_by_room( sync_config, now_token, since_token ) @@ -591,7 +583,6 @@ class SyncHandler(BaseHandler): ephemeral_by_room=ephemeral_by_room, tags_by_room=tags_by_room, account_data_by_room=account_data_by_room, - all_ephemeral_by_room=all_ephemeral_by_room, batch=batch, full_state=full_state, ) @@ -691,7 +682,6 @@ class SyncHandler(BaseHandler): since_token, now_token, ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room, batch, full_state=False): state = yield self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, @@ -722,7 +712,7 @@ class SyncHandler(BaseHandler): if room_sync: notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room + room_id, sync_config ) if notifs is not None: @@ -906,10 +896,12 @@ class SyncHandler(BaseHandler): return False @defer.inlineCallbacks - def unread_notifs_for_room_id(self, room_id, sync_config, ephemeral_by_room): + def unread_notifs_for_room_id(self, room_id, sync_config): with Measure(self.clock, "unread_notifs_for_room_id"): - last_unread_event_id = self.last_read_event_id_for_room_and_user( - room_id, sync_config.user.to_string(), ephemeral_by_room + last_unread_event_id = yield self.store.get_last_receipt_event_id_for_user( + user_id=sync_config.user.to_string(), + room_id=room_id, + receipt_type="m.read" ) notifs = [] diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 8068c73740..1aff9f070e 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -46,6 +46,20 @@ class ReceiptsStore(SQLBaseStore): desc="get_receipts_for_room", ) + @cached(num_args=3) + def get_last_receipt_event_id_for_user(self, user_id, room_id, receipt_type): + return self._simple_select_one_onecol( + table="receipts_linearized", + keyvalues={ + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id + }, + retcol="event_id", + desc="get_own_receipt_for_user", + allow_none=True, + ) + @cachedInlineCallbacks(num_args=2) def get_receipts_for_user(self, user_id, receipt_type): def f(txn): -- cgit 1.4.1 From 70a8608749e0c1ec7a993a9effc424303af24738 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:27:29 +0000 Subject: Invalidate get_last_receipt_event_id_for_user cache --- synapse/storage/receipts.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 1aff9f070e..4202a6b3dc 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -240,6 +240,11 @@ class ReceiptsStore(SQLBaseStore): room_id, stream_id ) + txn.call_after( + self.get_last_receipt_event_id_for_user.invalidate, + (user_id, room_id, receipt_type) + ) + # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts sql = ( -- cgit 1.4.1 From 78d6c1b5bec800671d3ff66acecb2f8bbdf41aa1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:44:12 +0000 Subject: Change a log from debug to info --- synapse/storage/prepare_database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index d782b8e25b..850736c85e 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -211,7 +211,7 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files, logger.debug("applied_delta_files: %s", applied_delta_files) for v in range(start_ver, SCHEMA_VERSION + 1): - logger.debug("Upgrading schema to v%d", v) + logger.info("Upgrading schema to v%d", v) delta_dir = os.path.join(dir_path, "schema", "delta", str(v)) -- cgit 1.4.1 From 7b0d846407a593ccd204f82aaa1090b8af8df84c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 16:19:15 +0000 Subject: Atomically persit push actions when we persist the event --- synapse/events/snapshot.py | 1 + synapse/handlers/_base.py | 10 ++++---- synapse/handlers/federation.py | 12 +++++----- synapse/push/action_generator.py | 20 ++++------------ synapse/storage/event_push_actions.py | 45 +++++++++++++---------------------- synapse/storage/events.py | 26 ++++++++++++-------- 6 files changed, 49 insertions(+), 65 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index f51200d18e..8a475417a6 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -20,3 +20,4 @@ class EventContext(object): self.current_state = current_state self.state_group = None self.rejected = False + self.push_actions = [] diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index d3f722b22e..064e8723c8 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -264,13 +264,13 @@ class BaseHandler(object): "You don't have permission to redact events" ) - (event_stream_id, max_stream_id) = yield self.store.persist_event( - event, context=context - ) - action_generator = ActionGenerator(self.hs) yield action_generator.handle_push_actions_for_event( - event, self, context.current_state + event, context, self + ) + + (event_stream_id, max_stream_id) = yield self.store.persist_event( + event, context=context ) destinations = set() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b78b0502d9..da55d43541 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -236,12 +236,6 @@ class FederationHandler(BaseHandler): user = UserID.from_string(event.state_key) yield user_joined_room(self.distributor, user, event.room_id) - if not backfilled and not event.internal_metadata.is_outlier(): - action_generator = ActionGenerator(self.hs) - yield action_generator.handle_push_actions_for_event( - event, self - ) - @defer.inlineCallbacks def _filter_events_for_server(self, server_name, room_id, events): event_to_state = yield self.store.get_state_for_events( @@ -1073,6 +1067,12 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) + if not backfilled and not event.internal_metadata.is_outlier(): + action_generator = ActionGenerator(self.hs) + yield action_generator.handle_push_actions_for_event( + event, context, self + ) + event_stream_id, max_stream_id = yield self.store.persist_event( event, context=context, diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index d8f8256a1f..e0da0868ec 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -19,8 +19,6 @@ import bulk_push_rule_evaluator import logging -from synapse.api.constants import EventTypes - logger = logging.getLogger(__name__) @@ -36,23 +34,15 @@ class ActionGenerator: # tag (ie. we just need all the users). @defer.inlineCallbacks - def handle_push_actions_for_event(self, event, handler, current_state): - if event.type == EventTypes.Redaction and event.redacts is not None: - yield self.store.remove_push_actions_for_event_id( - event.room_id, event.redacts - ) - + def handle_push_actions_for_event(self, event, context, handler): bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( event.room_id, self.hs, self.store ) actions_by_user = yield bulk_evaluator.action_for_event_by_user( - event, handler, current_state + event, handler, context.current_state ) - yield self.store.set_push_actions_for_event_and_users( - event, - [ - (uid, None, actions) for uid, actions in actions_by_user.items() - ] - ) + context.push_actions = [ + (uid, None, actions) for uid, actions in actions_by_user.items() + ] diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index d0a969f50b..466f07a1c4 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -24,8 +24,7 @@ logger = logging.getLogger(__name__) class EventPushActionsStore(SQLBaseStore): - @defer.inlineCallbacks - def set_push_actions_for_event_and_users(self, event, tuples): + def _set_push_actions_for_event_and_users(self, txn, event, tuples): """ :param event: the event set actions for :param tuples: list of tuples of (user_id, profile_tag, actions) @@ -44,18 +43,12 @@ class EventPushActionsStore(SQLBaseStore): 'highlight': 1 if _action_has_highlight(actions) else 0, }) - def f(txn): - for uid, _, __ in tuples: - txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (event.room_id, uid) - ) - return self._simple_insert_many_txn(txn, "event_push_actions", values) - - yield self.runInteraction( - "set_actions_for_event_and_users", - f, - ) + for uid, _, __ in tuples: + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (event.room_id, uid) + ) + self._simple_insert_many_txn(txn, "event_push_actions", values) @cachedInlineCallbacks(num_args=3, lru=True, tree=True) def get_unread_event_push_actions_by_room_for_user( @@ -107,21 +100,15 @@ class EventPushActionsStore(SQLBaseStore): ) defer.returnValue(ret) - @defer.inlineCallbacks - def remove_push_actions_for_event_id(self, room_id, event_id): - def f(txn): - # Sad that we have to blow away the cache for the whole room here - txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (room_id,) - ) - txn.execute( - "DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?", - (room_id, event_id) - ) - yield self.runInteraction( - "remove_push_actions_for_event_id", - f + def _remove_push_actions_for_event_id(self, txn, room_id, event_id): + # Sad that we have to blow away the cache for the whole room here + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (room_id,) + ) + txn.execute( + "DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?", + (room_id, event_id) ) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c6ed54721c..7d4012c414 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -205,23 +205,29 @@ class EventsStore(SQLBaseStore): @log_function def _persist_events_txn(self, txn, events_and_contexts, backfilled, is_new_state=True): - - # Remove the any existing cache entries for the event_ids - for event, _ in events_and_contexts: + depth_updates = {} + for event, context in events_and_contexts: + # Remove the any existing cache entries for the event_ids txn.call_after(self._invalidate_get_event_cache, event.event_id) - if not backfilled: txn.call_after( self._events_stream_cache.entity_has_changed, event.room_id, event.internal_metadata.stream_ordering, ) - depth_updates = {} - for event, _ in events_and_contexts: - if event.internal_metadata.is_outlier(): - continue - depth_updates[event.room_id] = max( - event.depth, depth_updates.get(event.room_id, event.depth) + if not event.internal_metadata.is_outlier(): + depth_updates[event.room_id] = max( + event.depth, depth_updates.get(event.room_id, event.depth) + ) + + if context.push_actions: + self._set_push_actions_for_event_and_users( + txn, event, context.push_actions + ) + + if event.type == EventTypes.Redaction and event.redacts is not None: + self._remove_push_actions_for_event_id( + txn, event.room_id, event.redacts ) for room_id, depth in depth_updates.items(): -- cgit 1.4.1 From 02147452396c67e7874b201460f8b1cc8996a90a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 11:09:56 +0000 Subject: Rename functions --- synapse/storage/event_push_actions.py | 4 ++-- synapse/storage/events.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 466f07a1c4..d77a817682 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) class EventPushActionsStore(SQLBaseStore): - def _set_push_actions_for_event_and_users(self, txn, event, tuples): + def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ :param event: the event set actions for :param tuples: list of tuples of (user_id, profile_tag, actions) @@ -100,7 +100,7 @@ class EventPushActionsStore(SQLBaseStore): ) defer.returnValue(ret) - def _remove_push_actions_for_event_id(self, txn, room_id, event_id): + def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here txn.call_after( self.get_unread_event_push_actions_by_room_for_user.invalidate_many, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 7d4012c414..3a5c6ee4b1 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,12 +221,12 @@ class EventsStore(SQLBaseStore): ) if context.push_actions: - self._set_push_actions_for_event_and_users( + self._set_push_actions_for_event_and_users_txn( txn, event, context.push_actions ) if event.type == EventTypes.Redaction and event.redacts is not None: - self._remove_push_actions_for_event_id( + self._remove_push_actions_for_event_id_txn( txn, event.room_id, event.redacts ) -- cgit 1.4.1 From 24f00a6c33900cf701330ff324b0479c1898d5ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 12:57:50 +0000 Subject: Use _simple_select_many for _get_state_group_for_events --- synapse/handlers/sync.py | 2 +- synapse/storage/state.py | 26 ++++++++++---------------- 2 files changed, 11 insertions(+), 17 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 84f29e3867..1d0f0058a2 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,7 +18,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.metrics import Measure from twisted.internet import defer diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 6c32e8f7b3..90ec50bb50 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -264,26 +264,20 @@ class StateStore(SQLBaseStore): ) @cachedList(cache=_get_state_group_for_event.cache, list_name="event_ids", - num_args=1) + num_args=1, inlineCallbacks=True) def _get_state_group_for_events(self, event_ids): """Returns mapping event_id -> state_group """ - def f(txn): - results = {} - for event_id in event_ids: - results[event_id] = self._simple_select_one_onecol_txn( - txn, - table="event_to_state_groups", - keyvalues={ - "event_id": event_id, - }, - retcol="state_group", - allow_none=True, - ) - - return results + rows = yield self._simple_select_many_batch( + table="event_to_state_groups", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id", "state_group",), + desc="_get_state_group_for_events", + ) - return self.runInteraction("_get_state_group_for_events", f) + defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) def _get_some_state_from_cache(self, group, types): """Checks if group is in cache. See `_get_state_for_groups` -- cgit 1.4.1 From 5189bfdef4c87a7b0527de603eae52ac27bd500c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 13:24:42 +0000 Subject: Batch fetch _get_state_groups_from_groups --- synapse/storage/state.py | 66 +++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 32 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 90ec50bb50..372b540002 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -171,41 +171,43 @@ class StateStore(SQLBaseStore): events = yield self._get_events(event_ids, get_prev_content=False) defer.returnValue(events) - def _get_state_groups_from_groups(self, groups_and_types): + def _get_state_groups_from_groups(self, groups, types): """Returns dictionary state_group -> state event ids - - Args: - groups_and_types (list): list of 2-tuple (`group`, `types`) """ - def f(txn): - results = {} - for group, types in groups_and_types: - if types is not None: - where_clause = "AND (%s)" % ( - " OR ".join(["(type = ? AND state_key = ?)"] * len(types)), - ) - else: - where_clause = "" - - sql = ( - "SELECT event_id FROM state_groups_state WHERE" - " state_group = ? %s" - ) % (where_clause,) + def f(txn, groups): + if types is not None: + where_clause = "AND (%s)" % ( + " OR ".join(["(type = ? AND state_key = ?)"] * len(types)), + ) + else: + where_clause = "" - args = [group] - if types is not None: - args.extend([i for typ in types for i in typ]) + sql = ( + "SELECT state_group, event_id FROM state_groups_state WHERE" + " state_group IN (%s) %s" % ( + ",".join("?" for _ in groups), + where_clause, + ) + ) - txn.execute(sql, args) + args = list(groups) + if types is not None: + args.extend([i for typ in types for i in typ]) - results[group] = [r[0] for r in txn.fetchall()] + txn.execute(sql, args) + rows = self.cursor_to_dict(txn) + results = {} + for row in rows: + results.setdefault(row["state_group"], []).append(row["event_id"]) return results - return self.runInteraction( - "_get_state_groups_from_groups", - f, - ) + chunks = [groups[i:i + 100] for i in xrange(0, len(groups), 100)] + for chunk in chunks: + return self.runInteraction( + "_get_state_groups_from_groups", + f, chunk + ) @defer.inlineCallbacks def get_state_for_events(self, event_ids, types): @@ -349,7 +351,7 @@ class StateStore(SQLBaseStore): all events are returned. """ results = {} - missing_groups_and_types = [] + missing_groups = [] if types is not None: for group in set(groups): state_dict, missing_types, got_all = self._get_some_state_from_cache( @@ -358,7 +360,7 @@ class StateStore(SQLBaseStore): results[group] = state_dict if not got_all: - missing_groups_and_types.append((group, missing_types)) + missing_groups.append(group) else: for group in set(groups): state_dict, got_all = self._get_all_state_from_cache( @@ -367,9 +369,9 @@ class StateStore(SQLBaseStore): results[group] = state_dict if not got_all: - missing_groups_and_types.append((group, None)) + missing_groups.append(group) - if not missing_groups_and_types: + if not missing_groups: defer.returnValue({ group: { type_tuple: event @@ -383,7 +385,7 @@ class StateStore(SQLBaseStore): cache_seq_num = self._state_group_cache.sequence group_state_dict = yield self._get_state_groups_from_groups( - missing_groups_and_types + missing_groups, types ) state_events = yield self._get_events( -- cgit 1.4.1