From 74c38797601f6d7d1a02d21fc54ceb1a54629c64 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 19 Nov 2014 18:20:59 +0000 Subject: Start creating a module to do generic notifications (just prints them to stdout currently!) --- synapse/storage/__init__.py | 6 ++- synapse/storage/pusher.py | 98 +++++++++++++++++++++++++++++++++++++ synapse/storage/schema/delta/v7.sql | 28 +++++++++++ synapse/storage/schema/pusher.sql | 28 +++++++++++ 4 files changed, 158 insertions(+), 2 deletions(-) create mode 100644 synapse/storage/pusher.py create mode 100644 synapse/storage/schema/delta/v7.sql create mode 100644 synapse/storage/schema/pusher.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c36d938d96..5957f938a4 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -33,6 +33,7 @@ from .stream import StreamStore from .transactions import TransactionStore from .keys import KeyStore from .event_federation import EventFederationStore +from .pusher import PusherStore from .state import StateStore from .signatures import SignatureStore @@ -62,12 +63,13 @@ SCHEMAS = [ "state", "event_edges", "event_signatures", + "pusher" ] # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 6 +SCHEMA_VERSION = 7 class _RollbackButIsFineException(Exception): @@ -81,7 +83,7 @@ class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, FeedbackStore, PresenceStore, TransactionStore, DirectoryStore, KeyStore, StateStore, SignatureStore, - EventFederationStore, ): + EventFederationStore, PusherStore, ): def __init__(self, hs): super(DataStore, self).__init__(hs) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py new file mode 100644 index 0000000000..047a5f42d9 --- /dev/null +++ b/synapse/storage/pusher.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from ._base import SQLBaseStore, Table +from twisted.internet import defer + +from sqlite3 import IntegrityError +from synapse.api.errors import StoreError + +import logging + +logger = logging.getLogger(__name__) + +class PusherStore(SQLBaseStore): + @defer.inlineCallbacks + def get_all_pushers_after_id(self, min_id): + sql = ( + "SELECT id, user_name, kind, app, app_display_name, device_display_name, pushkey, data, last_token " + "FROM pushers " + "WHERE id > ?" + ) + + rows = yield self._execute(None, sql, min_id) + + ret = [ + { + "id": r[0], + "user_name": r[1], + "kind": r[2], + "app": r[3], + "app_display_name": r[4], + "device_display_name": r[5], + "pushkey": r[6], + "data": r[7], + "last_token": r[8] + + } + for r in rows + ] + + defer.returnValue(ret) + + @defer.inlineCallbacks + def add_pusher(self, user_name, kind, app, app_display_name, device_display_name, pushkey, data): + try: + yield self._simple_insert(PushersTable.table_name, dict( + user_name=user_name, + kind=kind, + app=app, + app_display_name=app_display_name, + device_display_name=device_display_name, + pushkey=pushkey, + data=data + )) + except IntegrityError: + raise StoreError(409, "Pushkey in use.") + except Exception as e: + logger.error("create_pusher with failed: %s", e) + raise StoreError(500, "Problem creating pusher.") + + @defer.inlineCallbacks + def update_pusher_last_token(self, user_name, pushkey, last_token): + yield self._simple_update_one(PushersTable.table_name, + {'user_name': user_name, 'pushkey': pushkey}, + {'last_token': last_token} + ) + + +class PushersTable(Table): + table_name = "pushers" + + fields = [ + "id", + "user_name", + "kind", + "app" + "app_display_name", + "device_display_name", + "pushkey", + "data", + "last_token" + ] + + EntryType = collections.namedtuple("PusherEntry", fields) \ No newline at end of file diff --git a/synapse/storage/schema/delta/v7.sql b/synapse/storage/schema/delta/v7.sql new file mode 100644 index 0000000000..7f6852485d --- /dev/null +++ b/synapse/storage/schema/delta/v7.sql @@ -0,0 +1,28 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Push notification endpoints that users have configured +CREATE TABLE IF NOT EXISTS pushers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + kind varchar(8) NOT NULL, + app varchar(64) NOT NULL, + app_display_name varchar(64) NOT NULL, + device_display_name varchar(128) NOT NULL, + pushkey blob NOT NULL, + data text, + last_token TEXT, + FOREIGN KEY(user_name) REFERENCES users(name), + UNIQUE (user_name, pushkey) +); diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql new file mode 100644 index 0000000000..7f6852485d --- /dev/null +++ b/synapse/storage/schema/pusher.sql @@ -0,0 +1,28 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Push notification endpoints that users have configured +CREATE TABLE IF NOT EXISTS pushers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + kind varchar(8) NOT NULL, + app varchar(64) NOT NULL, + app_display_name varchar(64) NOT NULL, + device_display_name varchar(128) NOT NULL, + pushkey blob NOT NULL, + data text, + last_token TEXT, + FOREIGN KEY(user_name) REFERENCES users(name), + UNIQUE (user_name, pushkey) +); -- cgit 1.5.1 From eb6aedf92c0fe467fd4724623262907ad78573bb Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 21 Nov 2014 12:21:00 +0000 Subject: More work on pushers. Attempt to do HTTP pokes. Not sure if the actual HTTP pokes work or not yet but the retry semantics are pretty good. --- synapse/http/client.py | 19 ++++++++++++ synapse/push/__init__.py | 58 ++++++++++++++++++++++++++++++------- synapse/push/httppusher.py | 55 ++++++++++++++++++++++++++++++++--- synapse/push/pusherpool.py | 8 +++-- synapse/storage/pusher.py | 26 ++++++++++++++--- synapse/storage/schema/delta/v7.sql | 2 ++ synapse/storage/schema/pusher.sql | 2 ++ 7 files changed, 150 insertions(+), 20 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/http/client.py b/synapse/http/client.py index 048a428905..82e80385ce 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -60,6 +60,25 @@ class SimpleHttpClient(object): defer.returnValue(json.loads(body)) + @defer.inlineCallbacks + def post_json_get_json(self, uri, post_json): + json_str = json.dumps(post_json) + + logger.info("HTTP POST %s -> %s", json_str, uri) + + response = yield self.agent.request( + "POST", + uri.encode("ascii"), + headers=Headers({ + "Content-Type": ["application/json"] + }), + bodyProducer=FileBodyProducer(StringIO(json_str)) + ) + + body = yield readBody(response) + + defer.returnValue(json.loads(body)) + @defer.inlineCallbacks def get_json(self, uri, args={}): """ Get's some json from the given host and path diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index df0b91a8e9..a96f0f0183 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -26,12 +26,15 @@ logger = logging.getLogger(__name__) class Pusher(object): INITIAL_BACKOFF = 1000 - MAX_BACKOFF = 10 * 60 * 1000 + MAX_BACKOFF = 60 * 60 * 1000 + GIVE_UP_AFTER = 24 * 60 * 60 * 1000 - def __init__(self, _hs, user_name, app, app_display_name, device_display_name, pushkey, data, last_token): + def __init__(self, _hs, user_name, app, app_display_name, device_display_name, pushkey, data, + last_token, last_success, failing_since): self.hs = _hs self.evStreamHandler = self.hs.get_handlers().event_stream_handler self.store = self.hs.get_datastore() + self.clock = self.hs.get_clock() self.user_name = user_name self.app = app self.app_display_name = app_display_name @@ -40,6 +43,7 @@ class Pusher(object): self.data = data self.last_token = last_token self.backoff_delay = Pusher.INITIAL_BACKOFF + self.failing_since = None @defer.inlineCallbacks def start(self): @@ -58,17 +62,51 @@ class Pusher(object): config = PaginationConfig(from_token=from_tok, limit='1') chunk = yield self.evStreamHandler.get_stream(self.user_name, config, timeout=100*365*24*60*60*1000) - if (self.dispatchPush(chunk['chunk'][0])): + # limiting to 1 may get 1 event plus 1 presence event, so pick out the actual event + singleEvent = None + for c in chunk['chunk']: + if 'event_id' in c: # Hmmm... + singleEvent = c + break + if not singleEvent: + continue + + ret = yield self.dispatchPush(singleEvent) + if (ret): self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] - self.store.update_pusher_last_token(self.user_name, self.pushkey, self.last_token) + self.store.update_pusher_last_token_and_success(self.user_name, self.pushkey, + self.last_token, self.clock.time_msec()) + if self.failing_since: + self.failing_since = None + self.store.update_pusher_failing_since(self.user_name, self.pushkey, self.failing_since) else: - logger.warn("Failed to dispatch push for user %s. Trying again in %dms", - self.user_name, self.backoff_delay) - yield synapse.util.async.sleep(self.backoff_delay / 1000.0) - self.backoff_delay *=2 - if self.backoff_delay > Pusher.MAX_BACKOFF: - self.backoff_delay = Pusher.MAX_BACKOFF + if not self.failing_since: + self.failing_since = self.clock.time_msec() + self.store.update_pusher_failing_since(self.user_name, self.pushkey, self.failing_since) + + if self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER: + # we really only give up so that if the URL gets fixed, we don't suddenly deliver a load + # of old notifications. + logger.warn("Giving up on a notification to user %s, pushkey %s", + self.user_name, self.pushkey) + self.backoff_delay = Pusher.INITIAL_BACKOFF + self.last_token = chunk['end'] + self.store.update_pusher_last_token(self.user_name, self.pushkey, self.last_token) + + self.failing_since = None + self.store.update_pusher_failing_since(self.user_name, self.pushkey, self.failing_since) + else: + logger.warn("Failed to dispatch push for user %s (failing for %dms)." + "Trying again in %dms", + self.user_name, + self.clock.time_msec() - self.failing_since, + self.backoff_delay + ) + yield synapse.util.async.sleep(self.backoff_delay / 1000.0) + self.backoff_delay *=2 + if self.backoff_delay > Pusher.MAX_BACKOFF: + self.backoff_delay = Pusher.MAX_BACKOFF class PusherConfigException(Exception): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index f3c3ca8191..33d735b974 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -14,13 +14,17 @@ # limitations under the License. from synapse.push import Pusher, PusherConfigException +from synapse.http.client import SimpleHttpClient + +from twisted.internet import defer import logging logger = logging.getLogger(__name__) class HttpPusher(Pusher): - def __init__(self, _hs, user_name, app, app_display_name, device_display_name, pushkey, data, last_token): + def __init__(self, _hs, user_name, app, app_display_name, device_display_name, pushkey, data, + last_token, last_success, failing_since): super(HttpPusher, self).__init__(_hs, user_name, app, @@ -28,12 +32,55 @@ class HttpPusher(Pusher): device_display_name, pushkey, data, - last_token) + last_token, + last_success, + failing_since) if 'url' not in data: raise PusherConfigException("'url' required in data for HTTP pusher") self.url = data['url'] + self.httpCli = SimpleHttpClient(self.hs) + self.data_minus_url = {} + self.data_minus_url.update(self.data) + del self.data_minus_url['url'] + + def _build_notification_dict(self, event): + # we probably do not want to push for every presence update + # (we may want to be able to set up notifications when specific + # people sign in, but we'd want to only deliver the pertinent ones) + # Actually, presence events will not get this far now because we + # need to filter them out in the main Pusher code. + if 'event_id' not in event: + return None + + return { + 'notification': { + 'transition' : 'new', # everything is new for now: we don't have read receipts + 'id': event['event_id'], + 'type': event['type'], + 'from': event['user_id'], + # we may have to fetch this over federation and we can't trust it anyway: is it worth it? + #'fromDisplayName': 'Steve Stevington' + }, + #'counts': { -- we don't mark messages as read yet so we have no way of knowing + # 'unread': 1, + # 'missedCalls': 2 + # }, + 'devices': { + self.pushkey: { + 'data' : self.data_minus_url + } + } + } + @defer.inlineCallbacks def dispatchPush(self, event): - print event - return True + notificationDict = self._build_notification_dict(event) + if not notificationDict: + defer.returnValue(True) + try: + yield self.httpCli.post_json_get_json(self.url, notificationDict) + except: + logger.exception("Failed to push %s ", self.url) + defer.returnValue(False) + defer.returnValue(True) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 436040f123..3fa5a4c4ff 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -45,7 +45,9 @@ class PusherPool: "device_display_name": device_display_name, "pushkey": pushkey, "data": data, - "last_token": None + "last_token": None, + "last_success": None, + "failing_since": None }) self._add_pusher_to_store(user_name, kind, app, app_display_name, device_display_name, pushkey, data) @@ -69,7 +71,9 @@ class PusherPool: device_display_name=pusherdict['device_display_name'], pushkey=pusherdict['pushkey'], data=pusherdict['data'], - last_token=pusherdict['last_token'] + last_token=pusherdict['last_token'], + last_success=pusherdict['last_success'], + failing_since=pusherdict['failing_since'] ) else: raise PusherConfigException("Unknown pusher type '%s' for user %s" % diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 047a5f42d9..ce158c4b18 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -29,7 +29,8 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def get_all_pushers_after_id(self, min_id): sql = ( - "SELECT id, user_name, kind, app, app_display_name, device_display_name, pushkey, data, last_token " + "SELECT id, user_name, kind, app, app_display_name, device_display_name, pushkey, data, " + "last_token, last_success, failing_since " "FROM pushers " "WHERE id > ?" ) @@ -46,8 +47,9 @@ class PusherStore(SQLBaseStore): "device_display_name": r[5], "pushkey": r[6], "data": r[7], - "last_token": r[8] - + "last_token": r[8], + "last_success": r[9], + "failing_since": r[10] } for r in rows ] @@ -79,6 +81,20 @@ class PusherStore(SQLBaseStore): {'last_token': last_token} ) + @defer.inlineCallbacks + def update_pusher_last_token_and_success(self, user_name, pushkey, last_token, last_success): + yield self._simple_update_one(PushersTable.table_name, + {'user_name': user_name, 'pushkey': pushkey}, + {'last_token': last_token, 'last_success': last_success} + ) + + @defer.inlineCallbacks + def update_pusher_failing_since(self, user_name, pushkey, failing_since): + yield self._simple_update_one(PushersTable.table_name, + {'user_name': user_name, 'pushkey': pushkey}, + {'failing_since': failing_since} + ) + class PushersTable(Table): table_name = "pushers" @@ -92,7 +108,9 @@ class PushersTable(Table): "device_display_name", "pushkey", "data", - "last_token" + "last_token", + "last_success", + "failing_since" ] EntryType = collections.namedtuple("PusherEntry", fields) \ No newline at end of file diff --git a/synapse/storage/schema/delta/v7.sql b/synapse/storage/schema/delta/v7.sql index 7f6852485d..e83f7e7436 100644 --- a/synapse/storage/schema/delta/v7.sql +++ b/synapse/storage/schema/delta/v7.sql @@ -23,6 +23,8 @@ CREATE TABLE IF NOT EXISTS pushers ( pushkey blob NOT NULL, data text, last_token TEXT, + last_success BIGINT, + failing_since BIGINT, FOREIGN KEY(user_name) REFERENCES users(name), UNIQUE (user_name, pushkey) ); diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index 7f6852485d..e83f7e7436 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -23,6 +23,8 @@ CREATE TABLE IF NOT EXISTS pushers ( pushkey blob NOT NULL, data text, last_token TEXT, + last_success BIGINT, + failing_since BIGINT, FOREIGN KEY(user_name) REFERENCES users(name), UNIQUE (user_name, pushkey) ); -- cgit 1.5.1 From 88af58d41d561f1d9f6bbbfb2a1e8bd00dbbe638 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 3 Dec 2014 13:37:02 +0000 Subject: Update to app_id / app_instance_id (partially) and mangle to be PEP8 compliant. --- synapse/push/__init__.py | 97 +++++++++++++++++++++++++------------ synapse/push/httppusher.py | 75 +++++++++++++++------------- synapse/push/pusherpool.py | 75 +++++++++++++++++----------- synapse/rest/pusher.py | 32 +++++++----- synapse/storage/pusher.py | 54 ++++++++++++--------- synapse/storage/schema/delta/v7.sql | 5 +- synapse/storage/schema/pusher.sql | 5 +- 7 files changed, 213 insertions(+), 130 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index a96f0f0183..5fca3bd772 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -24,90 +24,127 @@ import logging logger = logging.getLogger(__name__) + class Pusher(object): INITIAL_BACKOFF = 1000 MAX_BACKOFF = 60 * 60 * 1000 GIVE_UP_AFTER = 24 * 60 * 60 * 1000 - def __init__(self, _hs, user_name, app, app_display_name, device_display_name, pushkey, data, + def __init__(self, _hs, user_name, app_id, app_instance_id, + app_display_name, device_display_name, pushkey, data, last_token, last_success, failing_since): self.hs = _hs self.evStreamHandler = self.hs.get_handlers().event_stream_handler self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() self.user_name = user_name - self.app = app + self.app_id = app_id + self.app_instance_id = app_instance_id self.app_display_name = app_display_name self.device_display_name = device_display_name self.pushkey = pushkey self.data = data self.last_token = last_token + self.last_success = last_success # not actually used self.backoff_delay = Pusher.INITIAL_BACKOFF - self.failing_since = None + self.failing_since = failing_since @defer.inlineCallbacks def start(self): if not self.last_token: - # First-time setup: get a token to start from (we can't just start from no token, ie. 'now' - # because we need the result to be reproduceable in case we fail to dispatch the push) + # First-time setup: get a token to start from (we can't + # just start from no token, ie. 'now' + # because we need the result to be reproduceable in case + # we fail to dispatch the push) config = PaginationConfig(from_token=None, limit='1') - chunk = yield self.evStreamHandler.get_stream(self.user_name, config, timeout=0) + chunk = yield self.evStreamHandler.get_stream( + self.user_name, config, timeout=0) self.last_token = chunk['end'] - self.store.update_pusher_last_token(self.user_name, self.pushkey, self.last_token) + self.store.update_pusher_last_token( + self.user_name, self.pushkey, self.last_token) logger.info("Pusher %s for user %s starting from token %s", self.pushkey, self.user_name, self.last_token) while True: from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') - chunk = yield self.evStreamHandler.get_stream(self.user_name, config, timeout=100*365*24*60*60*1000) + chunk = yield self.evStreamHandler.get_stream( + self.user_name, config, timeout=100*365*24*60*60*1000) - # limiting to 1 may get 1 event plus 1 presence event, so pick out the actual event - singleEvent = None + # limiting to 1 may get 1 event plus 1 presence event, so + # pick out the actual event + single_event = None for c in chunk['chunk']: - if 'event_id' in c: # Hmmm... - singleEvent = c + if 'event_id' in c: # Hmmm... + single_event = c break - if not singleEvent: + if not single_event: continue - ret = yield self.dispatchPush(singleEvent) - if (ret): + ret = yield self.dispatch_push(single_event) + if ret: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] - self.store.update_pusher_last_token_and_success(self.user_name, self.pushkey, - self.last_token, self.clock.time_msec()) + self.store.update_pusher_last_token_and_success( + self.user_name, + self.pushkey, + self.last_token, + self.clock.time_msec() + ) if self.failing_since: self.failing_since = None - self.store.update_pusher_failing_since(self.user_name, self.pushkey, self.failing_since) + self.store.update_pusher_failing_since( + self.user_name, + self.pushkey, + self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() - self.store.update_pusher_failing_since(self.user_name, self.pushkey, self.failing_since) + self.store.update_pusher_failing_since( + self.user_name, + self.pushkey, + self.failing_since + ) - if self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER: - # we really only give up so that if the URL gets fixed, we don't suddenly deliver a load + if self.failing_since and \ + self.failing_since < \ + self.clock.time_msec() - Pusher.GIVE_UP_AFTER: + # we really only give up so that if the URL gets + # fixed, we don't suddenly deliver a load # of old notifications. - logger.warn("Giving up on a notification to user %s, pushkey %s", + logger.warn("Giving up on a notification to user %s, " + "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] - self.store.update_pusher_last_token(self.user_name, self.pushkey, self.last_token) + self.store.update_pusher_last_token( + self.user_name, + self.pushkey, + self.last_token + ) self.failing_since = None - self.store.update_pusher_failing_since(self.user_name, self.pushkey, self.failing_since) + self.store.update_pusher_failing_since( + self.user_name, + self.pushkey, + self.failing_since + ) else: - logger.warn("Failed to dispatch push for user %s (failing for %dms)." + logger.warn("Failed to dispatch push for user %s " + "(failing for %dms)." "Trying again in %dms", - self.user_name, - self.clock.time_msec() - self.failing_since, - self.backoff_delay - ) + self.user_name, + self.clock.time_msec() - self.failing_since, + self.backoff_delay + ) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) - self.backoff_delay *=2 + self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF + def dispatch_push(self, p): + pass + class PusherConfigException(Exception): def __init__(self, msg): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 33d735b974..fd7fe4e39c 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -22,21 +22,28 @@ import logging logger = logging.getLogger(__name__) + class HttpPusher(Pusher): - def __init__(self, _hs, user_name, app, app_display_name, device_display_name, pushkey, data, + def __init__(self, _hs, user_name, app_id, app_instance_id, + app_display_name, device_display_name, pushkey, data, last_token, last_success, failing_since): - super(HttpPusher, self).__init__(_hs, - user_name, - app, - app_display_name, - device_display_name, - pushkey, - data, - last_token, - last_success, - failing_since) + super(HttpPusher, self).__init__( + _hs, + user_name, + app_id, + app_instance_id, + app_display_name, + device_display_name, + pushkey, + data, + last_token, + last_success, + failing_since + ) if 'url' not in data: - raise PusherConfigException("'url' required in data for HTTP pusher") + raise PusherConfigException( + "'url' required in data for HTTP pusher" + ) self.url = data['url'] self.httpCli = SimpleHttpClient(self.hs) self.data_minus_url = {} @@ -53,34 +60,36 @@ class HttpPusher(Pusher): return None return { - 'notification': { - 'transition' : 'new', # everything is new for now: we don't have read receipts - 'id': event['event_id'], - 'type': event['type'], - 'from': event['user_id'], - # we may have to fetch this over federation and we can't trust it anyway: is it worth it? - #'fromDisplayName': 'Steve Stevington' - }, - #'counts': { -- we don't mark messages as read yet so we have no way of knowing - # 'unread': 1, - # 'missedCalls': 2 - # }, - 'devices': { - self.pushkey: { - 'data' : self.data_minus_url + 'notification': { + 'transition': 'new', + # everything is new for now: we don't have read receipts + 'id': event['event_id'], + 'type': event['type'], + 'from': event['user_id'], + # we may have to fetch this over federation and we + # can't trust it anyway: is it worth it? + #'fromDisplayName': 'Steve Stevington' + }, + #'counts': { -- we don't mark messages as read yet so + # we have no way of knowing + # 'unread': 1, + # 'missedCalls': 2 + # }, + 'devices': { + self.pushkey: { + 'data': self.data_minus_url } - } + } } @defer.inlineCallbacks - def dispatchPush(self, event): - notificationDict = self._build_notification_dict(event) - if not notificationDict: + def dispatch_push(self, event): + notification_dict = self._build_notification_dict(event) + if not notification_dict: defer.returnValue(True) try: - yield self.httpCli.post_json_get_json(self.url, notificationDict) + yield self.httpCli.post_json_get_json(self.url, notification_dict) except: logger.exception("Failed to push %s ", self.url) defer.returnValue(False) defer.returnValue(True) - diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 3fa5a4c4ff..045c36f3b7 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -34,13 +34,17 @@ class PusherPool: def start(self): self._pushers_added() - def add_pusher(self, user_name, kind, app, app_display_name, device_display_name, pushkey, data): - # we try to create the pusher just to validate the config: it will then get pulled out of the database, - # recreated, added and started: this means we have only one code path adding pushers. + def add_pusher(self, user_name, kind, app_id, app_instance_id, + app_display_name, device_display_name, pushkey, data): + # we try to create the pusher just to validate the config: it + # will then get pulled out of the database, + # recreated, added and started: this means we have only one + # code path adding pushers. self._create_pusher({ "user_name": user_name, "kind": kind, - "app": app, + "app_id": app_id, + "app_instance_id": app_instance_id, "app_display_name": app_display_name, "device_display_name": device_display_name, "pushkey": pushkey, @@ -49,42 +53,55 @@ class PusherPool: "last_success": None, "failing_since": None }) - self._add_pusher_to_store(user_name, kind, app, app_display_name, device_display_name, pushkey, data) + self._add_pusher_to_store(user_name, kind, app_id, app_instance_id, + app_display_name, device_display_name, + pushkey, data) @defer.inlineCallbacks - def _add_pusher_to_store(self, user_name, kind, app, app_display_name, device_display_name, pushkey, data): - yield self.store.add_pusher(user_name=user_name, - kind=kind, - app=app, - app_display_name=app_display_name, - device_display_name=device_display_name, - pushkey=pushkey, - data=json.dumps(data)) + def _add_pusher_to_store(self, user_name, kind, app_id, app_instance_id, + app_display_name, device_display_name, + pushkey, data): + yield self.store.add_pusher( + user_name=user_name, + kind=kind, + app_id=app_id, + app_instance_id=app_instance_id, + app_display_name=app_display_name, + device_display_name=device_display_name, + pushkey=pushkey, + data=json.dumps(data) + ) self._pushers_added() def _create_pusher(self, pusherdict): if pusherdict['kind'] == 'http': - return HttpPusher(self.hs, - user_name=pusherdict['user_name'], - app=pusherdict['app'], - app_display_name=pusherdict['app_display_name'], - device_display_name=pusherdict['device_display_name'], - pushkey=pusherdict['pushkey'], - data=pusherdict['data'], - last_token=pusherdict['last_token'], - last_success=pusherdict['last_success'], - failing_since=pusherdict['failing_since'] - ) + return HttpPusher( + self.hs, + user_name=pusherdict['user_name'], + app_id=pusherdict['app_id'], + app_instance_id=pusherdict['app_instance_id'], + app_display_name=pusherdict['app_display_name'], + device_display_name=pusherdict['device_display_name'], + pushkey=pusherdict['pushkey'], + data=pusherdict['data'], + last_token=pusherdict['last_token'], + last_success=pusherdict['last_success'], + failing_since=pusherdict['failing_since'] + ) else: - raise PusherConfigException("Unknown pusher type '%s' for user %s" % - (pusherdict['kind'], pusherdict['user_name'])) + raise PusherConfigException( + "Unknown pusher type '%s' for user %s" % + (pusherdict['kind'], pusherdict['user_name']) + ) @defer.inlineCallbacks def _pushers_added(self): - pushers = yield self.store.get_all_pushers_after_id(self.last_pusher_started) + pushers = yield self.store.get_all_pushers_after_id( + self.last_pusher_started + ) for p in pushers: p['data'] = json.loads(p['data']) - if (len(pushers)): + if len(pushers): self.last_pusher_started = pushers[-1]['id'] self._start_pushers(pushers) @@ -95,4 +112,4 @@ class PusherPool: p = self._create_pusher(pusherdict) if p: self.pushers.append(p) - p.start() \ No newline at end of file + p.start() diff --git a/synapse/rest/pusher.py b/synapse/rest/pusher.py index 85d0d1c8cd..a39341cd8b 100644 --- a/synapse/rest/pusher.py +++ b/synapse/rest/pusher.py @@ -31,30 +31,37 @@ class PusherRestServlet(RestServlet): content = _parse_json(request) - reqd = ['kind', 'app', 'app_display_name', 'device_display_name', 'data'] + reqd = ['kind', 'app_id', 'app_instance_id', 'app_display_name', + 'device_display_name', 'data'] missing = [] for i in reqd: if i not in content: missing.append(i) if len(missing): - raise SynapseError(400, "Missing parameters: "+','.join(missing), errcode=Codes.MISSING_PARAM) + raise SynapseError(400, "Missing parameters: "+','.join(missing), + errcode=Codes.MISSING_PARAM) pusher_pool = self.hs.get_pusherpool() try: - pusher_pool.add_pusher(user_name=user.to_string(), - kind=content['kind'], - app=content['app'], - app_display_name=content['app_display_name'], - device_display_name=content['device_display_name'], - pushkey=pushkey, - data=content['data']) + pusher_pool.add_pusher( + user_name=user.to_string(), + kind=content['kind'], + app_id=content['app_id'], + app_instance_id=content['app_instance_id'], + app_display_name=content['app_display_name'], + device_display_name=content['device_display_name'], + pushkey=pushkey, + data=content['data'] + ) except PusherConfigException as pce: - raise SynapseError(400, "Config Error: "+pce.message, errcode=Codes.MISSING_PARAM) + raise SynapseError(400, "Config Error: "+pce.message, + errcode=Codes.MISSING_PARAM) defer.returnValue((200, {})) - def on_OPTIONS(self, request): - return (200, {}) + def on_OPTIONS(self, _): + return 200, {} + # XXX: C+ped from rest/room.py - surely this should be common? def _parse_json(request): @@ -67,5 +74,6 @@ def _parse_json(request): except ValueError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) + def register_servlets(hs, http_server): PusherRestServlet(hs).register(http_server) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index ce158c4b18..a858e46f3b 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -25,11 +25,13 @@ import logging logger = logging.getLogger(__name__) + class PusherStore(SQLBaseStore): @defer.inlineCallbacks def get_all_pushers_after_id(self, min_id): sql = ( - "SELECT id, user_name, kind, app, app_display_name, device_display_name, pushkey, data, " + "SELECT id, user_name, kind, app_id, app_instance_id," + "app_display_name, device_display_name, pushkey, data, " "last_token, last_success, failing_since " "FROM pushers " "WHERE id > ?" @@ -42,14 +44,15 @@ class PusherStore(SQLBaseStore): "id": r[0], "user_name": r[1], "kind": r[2], - "app": r[3], - "app_display_name": r[4], - "device_display_name": r[5], - "pushkey": r[6], - "data": r[7], - "last_token": r[8], - "last_success": r[9], - "failing_since": r[10] + "app_id": r[3], + "app_instance_id": r[4], + "app_display_name": r[5], + "device_display_name": r[6], + "pushkey": r[7], + "data": r[8], + "last_token": r[9], + "last_success": r[10], + "failing_since": r[11] } for r in rows ] @@ -57,12 +60,14 @@ class PusherStore(SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def add_pusher(self, user_name, kind, app, app_display_name, device_display_name, pushkey, data): + def add_pusher(self, user_name, kind, app_id, app_instance_id, + app_display_name, device_display_name, pushkey, data): try: yield self._simple_insert(PushersTable.table_name, dict( user_name=user_name, kind=kind, - app=app, + app_id=app_id, + app_instance_id=app_instance_id, app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, @@ -76,23 +81,27 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def update_pusher_last_token(self, user_name, pushkey, last_token): - yield self._simple_update_one(PushersTable.table_name, - {'user_name': user_name, 'pushkey': pushkey}, - {'last_token': last_token} + yield self._simple_update_one( + PushersTable.table_name, + {'user_name': user_name, 'pushkey': pushkey}, + {'last_token': last_token} ) @defer.inlineCallbacks - def update_pusher_last_token_and_success(self, user_name, pushkey, last_token, last_success): - yield self._simple_update_one(PushersTable.table_name, - {'user_name': user_name, 'pushkey': pushkey}, - {'last_token': last_token, 'last_success': last_success} + def update_pusher_last_token_and_success(self, user_name, pushkey, + last_token, last_success): + yield self._simple_update_one( + PushersTable.table_name, + {'user_name': user_name, 'pushkey': pushkey}, + {'last_token': last_token, 'last_success': last_success} ) @defer.inlineCallbacks def update_pusher_failing_since(self, user_name, pushkey, failing_since): - yield self._simple_update_one(PushersTable.table_name, - {'user_name': user_name, 'pushkey': pushkey}, - {'failing_since': failing_since} + yield self._simple_update_one( + PushersTable.table_name, + {'user_name': user_name, 'pushkey': pushkey}, + {'failing_since': failing_since} ) @@ -103,7 +112,8 @@ class PushersTable(Table): "id", "user_name", "kind", - "app" + "app_id", + "app_instance_id", "app_display_name", "device_display_name", "pushkey", diff --git a/synapse/storage/schema/delta/v7.sql b/synapse/storage/schema/delta/v7.sql index e83f7e7436..b60aeda756 100644 --- a/synapse/storage/schema/delta/v7.sql +++ b/synapse/storage/schema/delta/v7.sql @@ -17,11 +17,12 @@ CREATE TABLE IF NOT EXISTS pushers ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT NOT NULL, kind varchar(8) NOT NULL, - app varchar(64) NOT NULL, + app_id varchar(64) NOT NULL, + app_instance_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, - data text, + data blob, last_token TEXT, last_success BIGINT, failing_since BIGINT, diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index e83f7e7436..b60aeda756 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -17,11 +17,12 @@ CREATE TABLE IF NOT EXISTS pushers ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT NOT NULL, kind varchar(8) NOT NULL, - app varchar(64) NOT NULL, + app_id varchar(64) NOT NULL, + app_instance_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, - data text, + data blob, last_token TEXT, last_success BIGINT, failing_since BIGINT, -- cgit 1.5.1 From 9728c305a34a1f9546d2ce0ef4c54352dc55a16d Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 18 Dec 2014 14:49:22 +0000 Subject: after a few rethinks, a working implementation of pushers. --- synapse/push/__init__.py | 12 ++++-- synapse/push/httppusher.py | 25 +++++------ synapse/push/pusherpool.py | 47 +++++++++++---------- synapse/rest/pusher.py | 13 +++--- synapse/storage/_base.py | 45 ++++++++++++++++++++ synapse/storage/pusher.py | 83 +++++++++++++++++++++++++------------ synapse/storage/schema/delta/v7.sql | 3 +- synapse/storage/schema/pusher.sql | 3 +- 8 files changed, 158 insertions(+), 73 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 5fca3bd772..5fe8719fe7 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -30,7 +30,7 @@ class Pusher(object): MAX_BACKOFF = 60 * 60 * 1000 GIVE_UP_AFTER = 24 * 60 * 60 * 1000 - def __init__(self, _hs, user_name, app_id, app_instance_id, + def __init__(self, _hs, user_name, app_id, app_display_name, device_display_name, pushkey, data, last_token, last_success, failing_since): self.hs = _hs @@ -39,7 +39,6 @@ class Pusher(object): self.clock = self.hs.get_clock() self.user_name = user_name self.app_id = app_id - self.app_instance_id = app_instance_id self.app_display_name = app_display_name self.device_display_name = device_display_name self.pushkey = pushkey @@ -48,6 +47,7 @@ class Pusher(object): self.last_success = last_success # not actually used self.backoff_delay = Pusher.INITIAL_BACKOFF self.failing_since = failing_since + self.alive = True @defer.inlineCallbacks def start(self): @@ -65,7 +65,7 @@ class Pusher(object): logger.info("Pusher %s for user %s starting from token %s", self.pushkey, self.user_name, self.last_token) - while True: + while self.alive: from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') chunk = yield self.evStreamHandler.get_stream( @@ -81,6 +81,9 @@ class Pusher(object): if not single_event: continue + if not self.alive: + continue + ret = yield self.dispatch_push(single_event) if ret: self.backoff_delay = Pusher.INITIAL_BACKOFF @@ -142,6 +145,9 @@ class Pusher(object): if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF + def stop(self): + self.alive = False + def dispatch_push(self, p): pass diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index fd7fe4e39c..f94f673391 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -24,14 +24,13 @@ logger = logging.getLogger(__name__) class HttpPusher(Pusher): - def __init__(self, _hs, user_name, app_id, app_instance_id, + def __init__(self, _hs, user_name, app_id, app_display_name, device_display_name, pushkey, data, last_token, last_success, failing_since): super(HttpPusher, self).__init__( _hs, user_name, app_id, - app_instance_id, app_display_name, device_display_name, pushkey, @@ -69,16 +68,18 @@ class HttpPusher(Pusher): # we may have to fetch this over federation and we # can't trust it anyway: is it worth it? #'fromDisplayName': 'Steve Stevington' - }, - #'counts': { -- we don't mark messages as read yet so - # we have no way of knowing - # 'unread': 1, - # 'missedCalls': 2 - # }, - 'devices': { - self.pushkey: { - 'data': self.data_minus_url - } + #'counts': { -- we don't mark messages as read yet so + # we have no way of knowing + # 'unread': 1, + # 'missedCalls': 2 + # }, + 'devices': [ + { + 'app_id': self.app_id, + 'pushkey': self.pushkey, + 'data': self.data_minus_url + } + ] } } diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 045c36f3b7..d34ef3f6cf 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -24,17 +24,23 @@ import json logger = logging.getLogger(__name__) + class PusherPool: def __init__(self, _hs): self.hs = _hs self.store = self.hs.get_datastore() - self.pushers = [] + self.pushers = {} self.last_pusher_started = -1 + @defer.inlineCallbacks def start(self): - self._pushers_added() + pushers = yield self.store.get_all_pushers() + for p in pushers: + p['data'] = json.loads(p['data']) + self._start_pushers(pushers) - def add_pusher(self, user_name, kind, app_id, app_instance_id, + @defer.inlineCallbacks + def add_pusher(self, user_name, kind, app_id, app_display_name, device_display_name, pushkey, data): # we try to create the pusher just to validate the config: it # will then get pulled out of the database, @@ -44,7 +50,6 @@ class PusherPool: "user_name": user_name, "kind": kind, "app_id": app_id, - "app_instance_id": app_instance_id, "app_display_name": app_display_name, "device_display_name": device_display_name, "pushkey": pushkey, @@ -53,25 +58,26 @@ class PusherPool: "last_success": None, "failing_since": None }) - self._add_pusher_to_store(user_name, kind, app_id, app_instance_id, - app_display_name, device_display_name, - pushkey, data) + yield self._add_pusher_to_store( + user_name, kind, app_id, + app_display_name, device_display_name, + pushkey, data + ) @defer.inlineCallbacks - def _add_pusher_to_store(self, user_name, kind, app_id, app_instance_id, + def _add_pusher_to_store(self, user_name, kind, app_id, app_display_name, device_display_name, pushkey, data): yield self.store.add_pusher( user_name=user_name, kind=kind, app_id=app_id, - app_instance_id=app_instance_id, app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, data=json.dumps(data) ) - self._pushers_added() + self._refresh_pusher((app_id, pushkey)) def _create_pusher(self, pusherdict): if pusherdict['kind'] == 'http': @@ -79,7 +85,6 @@ class PusherPool: self.hs, user_name=pusherdict['user_name'], app_id=pusherdict['app_id'], - app_instance_id=pusherdict['app_instance_id'], app_display_name=pusherdict['app_display_name'], device_display_name=pusherdict['device_display_name'], pushkey=pusherdict['pushkey'], @@ -95,21 +100,21 @@ class PusherPool: ) @defer.inlineCallbacks - def _pushers_added(self): - pushers = yield self.store.get_all_pushers_after_id( - self.last_pusher_started + def _refresh_pusher(self, app_id_pushkey): + p = yield self.store.get_pushers_by_app_id_and_pushkey( + app_id_pushkey ) - for p in pushers: - p['data'] = json.loads(p['data']) - if len(pushers): - self.last_pusher_started = pushers[-1]['id'] + p['data'] = json.loads(p['data']) - self._start_pushers(pushers) + self._start_pushers([p]) def _start_pushers(self, pushers): - logger.info("Starting %d pushers", (len(pushers))) + logger.info("Starting %d pushers", len(pushers)) for pusherdict in pushers: p = self._create_pusher(pusherdict) if p: - self.pushers.append(p) + fullid = "%s:%s" % (pusherdict['app_id'], pusherdict['pushkey']) + if fullid in self.pushers: + self.pushers[fullid].stop() + self.pushers[fullid] = p p.start() diff --git a/synapse/rest/pusher.py b/synapse/rest/pusher.py index a39341cd8b..5b371318d0 100644 --- a/synapse/rest/pusher.py +++ b/synapse/rest/pusher.py @@ -23,16 +23,16 @@ import json class PusherRestServlet(RestServlet): - PATTERN = client_path_pattern("/pushers/(?P[\w]*)$") + PATTERN = client_path_pattern("/pushers/set$") @defer.inlineCallbacks - def on_PUT(self, request, pushkey): + def on_POST(self, request): user = yield self.auth.get_user_by_req(request) content = _parse_json(request) - reqd = ['kind', 'app_id', 'app_instance_id', 'app_display_name', - 'device_display_name', 'data'] + reqd = ['kind', 'app_id', 'app_display_name', + 'device_display_name', 'pushkey', 'data'] missing = [] for i in reqd: if i not in content: @@ -43,14 +43,13 @@ class PusherRestServlet(RestServlet): pusher_pool = self.hs.get_pusherpool() try: - pusher_pool.add_pusher( + yield pusher_pool.add_pusher( user_name=user.to_string(), kind=content['kind'], app_id=content['app_id'], - app_instance_id=content['app_instance_id'], app_display_name=content['app_display_name'], device_display_name=content['device_display_name'], - pushkey=pushkey, + pushkey=content['pushkey'], data=content['data'] ) except PusherConfigException as pce: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 4881f03368..eb8cc4a9f3 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -195,6 +195,51 @@ class SQLBaseStore(object): txn.execute(sql, values.values()) return txn.lastrowid + def _simple_upsert(self, table, keyvalues, values): + """ + :param table: The table to upsert into + :param keyvalues: Dict of the unique key tables and their new values + :param values: Dict of all the nonunique columns and their new values + :return: A deferred + """ + return self.runInteraction( + "_simple_upsert", + self._simple_upsert_txn, table, keyvalues, values + ) + + def _simple_upsert_txn(self, txn, table, keyvalues, values): + # Try to update + sql = "UPDATE %s SET %s WHERE %s" % ( + table, + ", ".join("%s = ?" % (k) for k in values), + " AND ".join("%s = ?" % (k) for k in keyvalues) + ) + sqlargs = values.values() + keyvalues.values() + logger.debug( + "[SQL] %s Args=%s", + sql, sqlargs, + ) + + txn.execute(sql, sqlargs) + if txn.rowcount == 0: + # We didn't update and rows so insert a new one + allvalues = {} + allvalues.update(keyvalues) + allvalues.update(values) + + sql = "INSERT INTO %s (%s) VALUES (%s)" % ( + table, + ", ".join(k for k in allvalues), + ", ".join("?" for _ in allvalues) + ) + logger.debug( + "[SQL] %s Args=%s", + sql, keyvalues.values(), + ) + txn.execute(sql, allvalues.values()) + + + def _simple_select_one(self, table, keyvalues, retcols, allow_none=False): """Executes a SELECT query on the named table, which is expected to diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index a858e46f3b..deabd9cd2e 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -28,16 +28,48 @@ logger = logging.getLogger(__name__) class PusherStore(SQLBaseStore): @defer.inlineCallbacks - def get_all_pushers_after_id(self, min_id): + def get_pushers_by_app_id_and_pushkey(self, app_id_and_pushkey): sql = ( - "SELECT id, user_name, kind, app_id, app_instance_id," + "SELECT id, user_name, kind, app_id," "app_display_name, device_display_name, pushkey, data, " "last_token, last_success, failing_since " "FROM pushers " - "WHERE id > ?" + "WHERE app_id = ? AND pushkey = ?" ) - rows = yield self._execute(None, sql, min_id) + rows = yield self._execute( + None, sql, app_id_and_pushkey[0], app_id_and_pushkey[1] + ) + + ret = [ + { + "id": r[0], + "user_name": r[1], + "kind": r[2], + "app_id": r[3], + "app_display_name": r[4], + "device_display_name": r[5], + "pushkey": r[6], + "data": r[7], + "last_token": r[8], + "last_success": r[9], + "failing_since": r[10] + } + for r in rows + ] + + defer.returnValue(ret[0]) + + @defer.inlineCallbacks + def get_all_pushers(self): + sql = ( + "SELECT id, user_name, kind, app_id," + "app_display_name, device_display_name, pushkey, data, " + "last_token, last_success, failing_since " + "FROM pushers" + ) + + rows = yield self._execute(None, sql) ret = [ { @@ -45,14 +77,13 @@ class PusherStore(SQLBaseStore): "user_name": r[1], "kind": r[2], "app_id": r[3], - "app_instance_id": r[4], - "app_display_name": r[5], - "device_display_name": r[6], - "pushkey": r[7], - "data": r[8], - "last_token": r[9], - "last_success": r[10], - "failing_since": r[11] + "app_display_name": r[4], + "device_display_name": r[5], + "pushkey": r[6], + "data": r[7], + "last_token": r[8], + "last_success": r[9], + "failing_since": r[10] } for r in rows ] @@ -60,21 +91,22 @@ class PusherStore(SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def add_pusher(self, user_name, kind, app_id, app_instance_id, + def add_pusher(self, user_name, kind, app_id, app_display_name, device_display_name, pushkey, data): try: - yield self._simple_insert(PushersTable.table_name, dict( - user_name=user_name, - kind=kind, - app_id=app_id, - app_instance_id=app_instance_id, - app_display_name=app_display_name, - device_display_name=device_display_name, - pushkey=pushkey, - data=data - )) - except IntegrityError: - raise StoreError(409, "Pushkey in use.") + yield self._simple_upsert( + PushersTable.table_name, + dict( + app_id=app_id, + pushkey=pushkey, + ), + dict( + user_name=user_name, + kind=kind, + app_display_name=app_display_name, + device_display_name=device_display_name, + data=data + )) except Exception as e: logger.error("create_pusher with failed: %s", e) raise StoreError(500, "Problem creating pusher.") @@ -113,7 +145,6 @@ class PushersTable(Table): "user_name", "kind", "app_id", - "app_instance_id", "app_display_name", "device_display_name", "pushkey", diff --git a/synapse/storage/schema/delta/v7.sql b/synapse/storage/schema/delta/v7.sql index b60aeda756..799e48d780 100644 --- a/synapse/storage/schema/delta/v7.sql +++ b/synapse/storage/schema/delta/v7.sql @@ -18,7 +18,6 @@ CREATE TABLE IF NOT EXISTS pushers ( user_name TEXT NOT NULL, kind varchar(8) NOT NULL, app_id varchar(64) NOT NULL, - app_instance_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, @@ -27,5 +26,5 @@ CREATE TABLE IF NOT EXISTS pushers ( last_success BIGINT, failing_since BIGINT, FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (user_name, pushkey) + UNIQUE (app_id, pushkey) ); diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index b60aeda756..799e48d780 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -18,7 +18,6 @@ CREATE TABLE IF NOT EXISTS pushers ( user_name TEXT NOT NULL, kind varchar(8) NOT NULL, app_id varchar(64) NOT NULL, - app_instance_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, @@ -27,5 +26,5 @@ CREATE TABLE IF NOT EXISTS pushers ( last_success BIGINT, failing_since BIGINT, FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (user_name, pushkey) + UNIQUE (app_id, pushkey) ); -- cgit 1.5.1 From fc7c5e9cd7e0b1e29984233249311abe5cf23735 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 18 Dec 2014 14:51:29 +0000 Subject: Rename the pusher SQL delta to v9 which the next free one --- synapse/storage/schema/delta/v7.sql | 30 ------------------------------ synapse/storage/schema/delta/v9.sql | 30 ++++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 30 deletions(-) delete mode 100644 synapse/storage/schema/delta/v7.sql create mode 100644 synapse/storage/schema/delta/v9.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/v7.sql b/synapse/storage/schema/delta/v7.sql deleted file mode 100644 index 799e48d780..0000000000 --- a/synapse/storage/schema/delta/v7.sql +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright 2014 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Push notification endpoints that users have configured -CREATE TABLE IF NOT EXISTS pushers ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - kind varchar(8) NOT NULL, - app_id varchar(64) NOT NULL, - app_display_name varchar(64) NOT NULL, - device_display_name varchar(128) NOT NULL, - pushkey blob NOT NULL, - data blob, - last_token TEXT, - last_success BIGINT, - failing_since BIGINT, - FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (app_id, pushkey) -); diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql new file mode 100644 index 0000000000..799e48d780 --- /dev/null +++ b/synapse/storage/schema/delta/v9.sql @@ -0,0 +1,30 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Push notification endpoints that users have configured +CREATE TABLE IF NOT EXISTS pushers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + kind varchar(8) NOT NULL, + app_id varchar(64) NOT NULL, + app_display_name varchar(64) NOT NULL, + device_display_name varchar(128) NOT NULL, + pushkey blob NOT NULL, + data blob, + last_token TEXT, + last_success BIGINT, + failing_since BIGINT, + FOREIGN KEY(user_name) REFERENCES users(name), + UNIQUE (app_id, pushkey) +); -- cgit 1.5.1 From 173264b656b480a2f3634f49e78fd6093633af56 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 18 Dec 2014 14:53:10 +0000 Subject: ...and bump SCHEMA_VERSION --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 642e5e289e..348c3b259c 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -69,7 +69,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 8 +SCHEMA_VERSION = 9 class _RollbackButIsFineException(Exception): -- cgit 1.5.1 From 4c7ad50f6e50b95dfa9e0961a504e2f0d5b6921a Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 18 Dec 2014 14:55:04 +0000 Subject: Thank you, pyflakes --- synapse/storage/pusher.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index deabd9cd2e..9b5170a5f7 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -18,7 +18,6 @@ import collections from ._base import SQLBaseStore, Table from twisted.internet import defer -from sqlite3 import IntegrityError from synapse.api.errors import StoreError import logging -- cgit 1.5.1 From afa953a29301dcae40606171ed4cdac90eefab63 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 18 Dec 2014 15:11:06 +0000 Subject: schema version is now 10 --- synapse/storage/__init__.py | 2 +- synapse/storage/schema/delta/v10.sql | 30 ++++++++++++++++++++++++++++++ synapse/storage/schema/delta/v9.sql | 30 ------------------------------ 3 files changed, 31 insertions(+), 31 deletions(-) create mode 100644 synapse/storage/schema/delta/v10.sql delete mode 100644 synapse/storage/schema/delta/v9.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 348c3b259c..ad1765e04d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -69,7 +69,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 9 +SCHEMA_VERSION = 10 class _RollbackButIsFineException(Exception): diff --git a/synapse/storage/schema/delta/v10.sql b/synapse/storage/schema/delta/v10.sql new file mode 100644 index 0000000000..799e48d780 --- /dev/null +++ b/synapse/storage/schema/delta/v10.sql @@ -0,0 +1,30 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Push notification endpoints that users have configured +CREATE TABLE IF NOT EXISTS pushers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + kind varchar(8) NOT NULL, + app_id varchar(64) NOT NULL, + app_display_name varchar(64) NOT NULL, + device_display_name varchar(128) NOT NULL, + pushkey blob NOT NULL, + data blob, + last_token TEXT, + last_success BIGINT, + failing_since BIGINT, + FOREIGN KEY(user_name) REFERENCES users(name), + UNIQUE (app_id, pushkey) +); diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql deleted file mode 100644 index 799e48d780..0000000000 --- a/synapse/storage/schema/delta/v9.sql +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright 2014 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Push notification endpoints that users have configured -CREATE TABLE IF NOT EXISTS pushers ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - kind varchar(8) NOT NULL, - app_id varchar(64) NOT NULL, - app_display_name varchar(64) NOT NULL, - device_display_name varchar(128) NOT NULL, - pushkey blob NOT NULL, - data blob, - last_token TEXT, - last_success BIGINT, - failing_since BIGINT, - FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (app_id, pushkey) -); -- cgit 1.5.1 From 2cb30767fa5e428f82c6c3ebced15d568d671c3c Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 13 Jan 2015 19:48:37 +0000 Subject: Honour the 'rejected' return from push gateways Add a timestamp to push tokens so we know the last time they we got them from the device. Send it to the push gateways so it can determine whether its failure is more recent than the token. Stop and remove pushers that have been rejected. --- synapse/push/__init__.py | 37 +++++++++++++++++++++++++++++++++--- synapse/push/httppusher.py | 15 ++++++++++----- synapse/push/pusherpool.py | 12 ++++++++++++ synapse/storage/pusher.py | 34 ++++++++++++++++++++++----------- synapse/storage/schema/delta/v10.sql | 1 + synapse/storage/schema/pusher.sql | 1 + 6 files changed, 81 insertions(+), 19 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index f4795d559c..839f666390 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -31,8 +31,8 @@ class Pusher(object): GIVE_UP_AFTER = 24 * 60 * 60 * 1000 def __init__(self, _hs, user_name, app_id, - app_display_name, device_display_name, pushkey, data, - last_token, last_success, failing_since): + app_display_name, device_display_name, pushkey, pushkey_ts, + data, last_token, last_success, failing_since): self.hs = _hs self.evStreamHandler = self.hs.get_handlers().event_stream_handler self.store = self.hs.get_datastore() @@ -42,6 +42,7 @@ class Pusher(object): self.app_display_name = app_display_name self.device_display_name = device_display_name self.pushkey = pushkey + self.pushkey_ts = pushkey_ts self.data = data self.last_token = last_token self.last_success = last_success # not actually used @@ -98,9 +99,31 @@ class Pusher(object): processed = False if self._should_notify_for_event(single_event): - processed = yield self.dispatch_push(single_event) + rejected = yield self.dispatch_push(single_event) + if not rejected == False: + processed = True + for pk in rejected: + if pk != self.pushkey: + # for sanity, we only remove the pushkey if it + # was the one we actually sent... + logger.warn( + ("Ignoring rejected pushkey %s because we" + + "didn't send it"), (pk,) + ) + else: + logger.info( + "Pushkey %s was rejected: removing", + pk + ) + yield self.hs.get_pusherpool().remove_pusher( + self.app_id, pk + ) else: processed = True + + if not self.alive: + continue + if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] @@ -165,6 +188,14 @@ class Pusher(object): self.alive = False def dispatch_push(self, p): + """ + Overridden by implementing classes to actually deliver the notification + :param p: The event to notify for as a single event from the event stream + :return: If the notification was delivered, an array containing any + pushkeys that were rejected by the push gateway. + False if the notification could not be delivered (ie. + should be retried). + """ pass diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index f94f673391..bcfa06e2ab 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -25,8 +25,8 @@ logger = logging.getLogger(__name__) class HttpPusher(Pusher): def __init__(self, _hs, user_name, app_id, - app_display_name, device_display_name, pushkey, data, - last_token, last_success, failing_since): + app_display_name, device_display_name, pushkey, pushkey_ts, + data, last_token, last_success, failing_since): super(HttpPusher, self).__init__( _hs, user_name, @@ -34,6 +34,7 @@ class HttpPusher(Pusher): app_display_name, device_display_name, pushkey, + pushkey_ts, data, last_token, last_success, @@ -77,6 +78,7 @@ class HttpPusher(Pusher): { 'app_id': self.app_id, 'pushkey': self.pushkey, + 'pushkeyTs': long(self.pushkey_ts / 1000), 'data': self.data_minus_url } ] @@ -87,10 +89,13 @@ class HttpPusher(Pusher): def dispatch_push(self, event): notification_dict = self._build_notification_dict(event) if not notification_dict: - defer.returnValue(True) + defer.returnValue([]) try: - yield self.httpCli.post_json_get_json(self.url, notification_dict) + resp = yield self.httpCli.post_json_get_json(self.url, notification_dict) except: logger.exception("Failed to push %s ", self.url) defer.returnValue(False) - defer.returnValue(True) + rejected = [] + if 'rejected' in resp: + rejected = resp['rejected'] + defer.returnValue(rejected) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d34ef3f6cf..edddc3003e 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -53,6 +53,7 @@ class PusherPool: "app_display_name": app_display_name, "device_display_name": device_display_name, "pushkey": pushkey, + "pushkey_ts": self.hs.get_clock().time_msec(), "data": data, "last_token": None, "last_success": None, @@ -75,6 +76,7 @@ class PusherPool: app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, + pushkey_ts=self.hs.get_clock().time_msec(), data=json.dumps(data) ) self._refresh_pusher((app_id, pushkey)) @@ -88,6 +90,7 @@ class PusherPool: app_display_name=pusherdict['app_display_name'], device_display_name=pusherdict['device_display_name'], pushkey=pusherdict['pushkey'], + pushkey_ts=pusherdict['pushkey_ts'], data=pusherdict['data'], last_token=pusherdict['last_token'], last_success=pusherdict['last_success'], @@ -118,3 +121,12 @@ class PusherPool: self.pushers[fullid].stop() self.pushers[fullid] = p p.start() + + @defer.inlineCallbacks + def remove_pusher(self, app_id, pushkey): + fullid = "%s:%s" % (app_id, pushkey) + if fullid in self.pushers: + logger.info("Stopping pusher %s", fullid) + self.pushers[fullid].stop() + del self.pushers[fullid] + yield self.store.delete_pusher_by_app_id_pushkey(app_id, pushkey) \ No newline at end of file diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 9b5170a5f7..bfc4980256 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -30,7 +30,7 @@ class PusherStore(SQLBaseStore): def get_pushers_by_app_id_and_pushkey(self, app_id_and_pushkey): sql = ( "SELECT id, user_name, kind, app_id," - "app_display_name, device_display_name, pushkey, data, " + "app_display_name, device_display_name, pushkey, ts, data, " "last_token, last_success, failing_since " "FROM pushers " "WHERE app_id = ? AND pushkey = ?" @@ -49,10 +49,11 @@ class PusherStore(SQLBaseStore): "app_display_name": r[4], "device_display_name": r[5], "pushkey": r[6], - "data": r[7], - "last_token": r[8], - "last_success": r[9], - "failing_since": r[10] + "pushkey_ts": r[7], + "data": r[8], + "last_token": r[9], + "last_success": r[10], + "failing_since": r[11] } for r in rows ] @@ -63,7 +64,7 @@ class PusherStore(SQLBaseStore): def get_all_pushers(self): sql = ( "SELECT id, user_name, kind, app_id," - "app_display_name, device_display_name, pushkey, data, " + "app_display_name, device_display_name, pushkey, ts, data, " "last_token, last_success, failing_since " "FROM pushers" ) @@ -79,10 +80,11 @@ class PusherStore(SQLBaseStore): "app_display_name": r[4], "device_display_name": r[5], "pushkey": r[6], - "data": r[7], - "last_token": r[8], - "last_success": r[9], - "failing_since": r[10] + "pushkey_ts": r[7], + "data": r[8], + "last_token": r[9], + "last_success": r[10], + "failing_since": r[11] } for r in rows ] @@ -91,7 +93,8 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def add_pusher(self, user_name, kind, app_id, - app_display_name, device_display_name, pushkey, data): + app_display_name, device_display_name, + pushkey, pushkey_ts, data): try: yield self._simple_upsert( PushersTable.table_name, @@ -104,12 +107,20 @@ class PusherStore(SQLBaseStore): kind=kind, app_display_name=app_display_name, device_display_name=device_display_name, + ts=pushkey_ts, data=data )) except Exception as e: logger.error("create_pusher with failed: %s", e) raise StoreError(500, "Problem creating pusher.") + @defer.inlineCallbacks + def delete_pusher_by_app_id_pushkey(self, app_id, pushkey): + yield self._simple_delete_one( + PushersTable.table_name, + dict(app_id=app_id, pushkey=pushkey) + ) + @defer.inlineCallbacks def update_pusher_last_token(self, user_name, pushkey, last_token): yield self._simple_update_one( @@ -147,6 +158,7 @@ class PushersTable(Table): "app_display_name", "device_display_name", "pushkey", + "pushkey_ts", "data", "last_token", "last_success", diff --git a/synapse/storage/schema/delta/v10.sql b/synapse/storage/schema/delta/v10.sql index 799e48d780..a991e4eb11 100644 --- a/synapse/storage/schema/delta/v10.sql +++ b/synapse/storage/schema/delta/v10.sql @@ -21,6 +21,7 @@ CREATE TABLE IF NOT EXISTS pushers ( app_display_name varchar(64) NOT NULL, device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, + ts BIGINT NOT NULL, data blob, last_token TEXT, last_success BIGINT, diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index 799e48d780..a991e4eb11 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -21,6 +21,7 @@ CREATE TABLE IF NOT EXISTS pushers ( app_display_name varchar(64) NOT NULL, device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, + ts BIGINT NOT NULL, data blob, last_token TEXT, last_success BIGINT, -- cgit 1.5.1 From 2ca2dbc82183f7dbe8c01694bf1c32a8c4c4b9de Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 15 Jan 2015 16:56:18 +0000 Subject: Send room name and first alias in notification poke. --- synapse/push/__init__.py | 13 +++++++++++++ synapse/push/httppusher.py | 16 +++++++++++++--- synapse/storage/__init__.py | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9cf996fb80..5f4e833add 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -61,6 +61,19 @@ class Pusher(object): return False return True + @defer.inlineCallbacks + def get_context_for_event(self, ev): + name_aliases = yield self.store.get_room_name_and_aliases( + ev['room_id'] + ) + + ctx = {'aliases': name_aliases[1]} + if name_aliases[0] is not None: + ctx['name'] = name_aliases[0] + + defer.returnValue(ctx) + + @defer.inlineCallbacks def start(self): if not self.last_token: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index bcfa06e2ab..7631a741fa 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -50,6 +50,7 @@ class HttpPusher(Pusher): self.data_minus_url.update(self.data) del self.data_minus_url['url'] + @defer.inlineCallbacks def _build_notification_dict(self, event): # we probably do not want to push for every presence update # (we may want to be able to set up notifications when specific @@ -57,9 +58,11 @@ class HttpPusher(Pusher): # Actually, presence events will not get this far now because we # need to filter them out in the main Pusher code. if 'event_id' not in event: - return None + defer.returnValue(None) + + ctx = yield self.get_context_for_event(event) - return { + d = { 'notification': { 'transition': 'new', # everything is new for now: we don't have read receipts @@ -85,9 +88,16 @@ class HttpPusher(Pusher): } } + if len(ctx['aliases']): + d['notification']['roomAlias'] = ctx['aliases'][0] + if 'name' in ctx: + d['notification']['roomName'] = ctx['name'] + + defer.returnValue(d) + @defer.inlineCallbacks def dispatch_push(self, event): - notification_dict = self._build_notification_dict(event) + notification_dict = yield self._build_notification_dict(event) if not notification_dict: defer.returnValue([]) try: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index fa7ad0eea8..191fe462a5 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -384,6 +384,41 @@ class DataStore(RoomMemberStore, RoomStore, events = yield self._parse_events(results) defer.returnValue(events) + @defer.inlineCallbacks + def get_room_name_and_aliases(self, room_id): + del_sql = ( + "SELECT event_id FROM redactions WHERE redacts = e.event_id " + "LIMIT 1" + ) + + sql = ( + "SELECT e.*, (%(redacted)s) AS redacted FROM events as e " + "INNER JOIN current_state_events as c ON e.event_id = c.event_id " + "INNER JOIN state_events as s ON e.event_id = s.event_id " + "WHERE c.room_id = ? " + ) % { + "redacted": del_sql, + } + + sql += " AND (s.type = 'm.room.name' AND s.state_key = '')" + sql += " OR s.type = 'm.room.aliases'" + args = (room_id,) + + results = yield self._execute_and_decode(sql, *args) + + events = yield self._parse_events(results) + + name = None + aliases = [] + + for e in events: + if e.type == 'm.room.name': + name = e.content['name'] + elif e.type == 'm.room.aliases': + aliases.extend(e.content['aliases']) + + defer.returnValue((name, aliases)) + @defer.inlineCallbacks def _get_min_token(self): row = yield self._execute( -- cgit 1.5.1 From 2d2953cf5fce26625e56fc1abc230735d007ea1e Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 16 Jan 2015 11:24:10 +0000 Subject: Require device language when adding a pusher. Because this seems like it might be useful to do sooner rather than later. --- synapse/push/pusherpool.py | 8 +++++--- synapse/rest/pusher.py | 3 ++- synapse/storage/pusher.py | 3 ++- synapse/storage/schema/delta/v10.sql | 1 + synapse/storage/schema/pusher.sql | 1 + 5 files changed, 11 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index edddc3003e..8c77f4b668 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -41,7 +41,7 @@ class PusherPool: @defer.inlineCallbacks def add_pusher(self, user_name, kind, app_id, - app_display_name, device_display_name, pushkey, data): + app_display_name, device_display_name, pushkey, lang, data): # we try to create the pusher just to validate the config: it # will then get pulled out of the database, # recreated, added and started: this means we have only one @@ -54,6 +54,7 @@ class PusherPool: "device_display_name": device_display_name, "pushkey": pushkey, "pushkey_ts": self.hs.get_clock().time_msec(), + "lang": lang, "data": data, "last_token": None, "last_success": None, @@ -62,13 +63,13 @@ class PusherPool: yield self._add_pusher_to_store( user_name, kind, app_id, app_display_name, device_display_name, - pushkey, data + pushkey, lang, data ) @defer.inlineCallbacks def _add_pusher_to_store(self, user_name, kind, app_id, app_display_name, device_display_name, - pushkey, data): + pushkey, lang, data): yield self.store.add_pusher( user_name=user_name, kind=kind, @@ -77,6 +78,7 @@ class PusherPool: device_display_name=device_display_name, pushkey=pushkey, pushkey_ts=self.hs.get_clock().time_msec(), + lang=lang, data=json.dumps(data) ) self._refresh_pusher((app_id, pushkey)) diff --git a/synapse/rest/pusher.py b/synapse/rest/pusher.py index 5b371318d0..6b9a59adb6 100644 --- a/synapse/rest/pusher.py +++ b/synapse/rest/pusher.py @@ -32,7 +32,7 @@ class PusherRestServlet(RestServlet): content = _parse_json(request) reqd = ['kind', 'app_id', 'app_display_name', - 'device_display_name', 'pushkey', 'data'] + 'device_display_name', 'pushkey', 'lang', 'data'] missing = [] for i in reqd: if i not in content: @@ -50,6 +50,7 @@ class PusherRestServlet(RestServlet): app_display_name=content['app_display_name'], device_display_name=content['device_display_name'], pushkey=content['pushkey'], + lang=content['lang'], data=content['data'] ) except PusherConfigException as pce: diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index bfc4980256..4eb30c7bdf 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -94,7 +94,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def add_pusher(self, user_name, kind, app_id, app_display_name, device_display_name, - pushkey, pushkey_ts, data): + pushkey, pushkey_ts, lang, data): try: yield self._simple_upsert( PushersTable.table_name, @@ -108,6 +108,7 @@ class PusherStore(SQLBaseStore): app_display_name=app_display_name, device_display_name=device_display_name, ts=pushkey_ts, + lang=lang, data=data )) except Exception as e: diff --git a/synapse/storage/schema/delta/v10.sql b/synapse/storage/schema/delta/v10.sql index a991e4eb11..689d2dff8b 100644 --- a/synapse/storage/schema/delta/v10.sql +++ b/synapse/storage/schema/delta/v10.sql @@ -22,6 +22,7 @@ CREATE TABLE IF NOT EXISTS pushers ( device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, ts BIGINT NOT NULL, + lang varchar(8), data blob, last_token TEXT, last_success BIGINT, diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index a991e4eb11..689d2dff8b 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -22,6 +22,7 @@ CREATE TABLE IF NOT EXISTS pushers ( device_display_name varchar(128) NOT NULL, pushkey blob NOT NULL, ts BIGINT NOT NULL, + lang varchar(8), data blob, last_token TEXT, last_success BIGINT, -- cgit 1.5.1 From afb714f7bebf88ac27eac018cffa2078e2723310 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 20 Jan 2015 11:49:48 +0000 Subject: add instance_handles to pushers so we have a way to refer to them even if the push token changes. --- synapse/push/__init__.py | 3 ++- synapse/push/httppusher.py | 3 ++- synapse/push/pusherpool.py | 9 ++++--- synapse/rest/pusher.py | 3 ++- synapse/storage/pusher.py | 46 ++++++++++++++++++++---------------- synapse/storage/schema/delta/v10.sql | 1 + synapse/storage/schema/pusher.sql | 1 + 7 files changed, 39 insertions(+), 27 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 5f4e833add..3ee652f3bc 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -30,13 +30,14 @@ class Pusher(object): MAX_BACKOFF = 60 * 60 * 1000 GIVE_UP_AFTER = 24 * 60 * 60 * 1000 - def __init__(self, _hs, user_name, app_id, + def __init__(self, _hs, instance_handle, user_name, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): self.hs = _hs self.evStreamHandler = self.hs.get_handlers().event_stream_handler self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() + self.instance_handle = instance_handle, self.user_name = user_name self.app_id = app_id self.app_display_name = app_display_name diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 7631a741fa..9a3e0be15e 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -24,11 +24,12 @@ logger = logging.getLogger(__name__) class HttpPusher(Pusher): - def __init__(self, _hs, user_name, app_id, + def __init__(self, _hs, instance_handle, user_name, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): super(HttpPusher, self).__init__( _hs, + instance_handle, user_name, app_id, app_display_name, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 8c77f4b668..2dfecf178b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -40,7 +40,7 @@ class PusherPool: self._start_pushers(pushers) @defer.inlineCallbacks - def add_pusher(self, user_name, kind, app_id, + def add_pusher(self, user_name, instance_handle, kind, app_id, app_display_name, device_display_name, pushkey, lang, data): # we try to create the pusher just to validate the config: it # will then get pulled out of the database, @@ -49,6 +49,7 @@ class PusherPool: self._create_pusher({ "user_name": user_name, "kind": kind, + "instance_handle": instance_handle, "app_id": app_id, "app_display_name": app_display_name, "device_display_name": device_display_name, @@ -61,17 +62,18 @@ class PusherPool: "failing_since": None }) yield self._add_pusher_to_store( - user_name, kind, app_id, + user_name, instance_handle, kind, app_id, app_display_name, device_display_name, pushkey, lang, data ) @defer.inlineCallbacks - def _add_pusher_to_store(self, user_name, kind, app_id, + def _add_pusher_to_store(self, user_name, instance_handle, kind, app_id, app_display_name, device_display_name, pushkey, lang, data): yield self.store.add_pusher( user_name=user_name, + instance_handle=instance_handle, kind=kind, app_id=app_id, app_display_name=app_display_name, @@ -87,6 +89,7 @@ class PusherPool: if pusherdict['kind'] == 'http': return HttpPusher( self.hs, + instance_handle=pusherdict['instance_handle'], user_name=pusherdict['user_name'], app_id=pusherdict['app_id'], app_display_name=pusherdict['app_display_name'], diff --git a/synapse/rest/pusher.py b/synapse/rest/pusher.py index 6b9a59adb6..4659c9b1d9 100644 --- a/synapse/rest/pusher.py +++ b/synapse/rest/pusher.py @@ -31,7 +31,7 @@ class PusherRestServlet(RestServlet): content = _parse_json(request) - reqd = ['kind', 'app_id', 'app_display_name', + reqd = ['instance_handle', 'kind', 'app_id', 'app_display_name', 'device_display_name', 'pushkey', 'lang', 'data'] missing = [] for i in reqd: @@ -45,6 +45,7 @@ class PusherRestServlet(RestServlet): try: yield pusher_pool.add_pusher( user_name=user.to_string(), + instance_handle=content['instance_handle'], kind=content['kind'], app_id=content['app_id'], app_display_name=content['app_display_name'], diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 4eb30c7bdf..113cdc8a8e 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -29,7 +29,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def get_pushers_by_app_id_and_pushkey(self, app_id_and_pushkey): sql = ( - "SELECT id, user_name, kind, app_id," + "SELECT id, user_name, kind, instance_handle, app_id," "app_display_name, device_display_name, pushkey, ts, data, " "last_token, last_success, failing_since " "FROM pushers " @@ -45,15 +45,16 @@ class PusherStore(SQLBaseStore): "id": r[0], "user_name": r[1], "kind": r[2], - "app_id": r[3], - "app_display_name": r[4], - "device_display_name": r[5], - "pushkey": r[6], - "pushkey_ts": r[7], - "data": r[8], - "last_token": r[9], - "last_success": r[10], - "failing_since": r[11] + "instance_handle": r[3], + "app_id": r[4], + "app_display_name": r[5], + "device_display_name": r[6], + "pushkey": r[7], + "pushkey_ts": r[8], + "data": r[9], + "last_token": r[10], + "last_success": r[11], + "failing_since": r[12] } for r in rows ] @@ -63,7 +64,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def get_all_pushers(self): sql = ( - "SELECT id, user_name, kind, app_id," + "SELECT id, user_name, kind, instance_handle, app_id," "app_display_name, device_display_name, pushkey, ts, data, " "last_token, last_success, failing_since " "FROM pushers" @@ -76,15 +77,16 @@ class PusherStore(SQLBaseStore): "id": r[0], "user_name": r[1], "kind": r[2], - "app_id": r[3], - "app_display_name": r[4], - "device_display_name": r[5], - "pushkey": r[6], - "pushkey_ts": r[7], - "data": r[8], - "last_token": r[9], - "last_success": r[10], - "failing_since": r[11] + "instance_handle": r[3], + "app_id": r[4], + "app_display_name": r[5], + "device_display_name": r[6], + "pushkey": r[7], + "pushkey_ts": r[8], + "data": r[9], + "last_token": r[10], + "last_success": r[11], + "failing_since": r[12] } for r in rows ] @@ -92,7 +94,7 @@ class PusherStore(SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def add_pusher(self, user_name, kind, app_id, + def add_pusher(self, user_name, instance_handle, kind, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, lang, data): try: @@ -105,6 +107,7 @@ class PusherStore(SQLBaseStore): dict( user_name=user_name, kind=kind, + instance_handle=instance_handle, app_display_name=app_display_name, device_display_name=device_display_name, ts=pushkey_ts, @@ -155,6 +158,7 @@ class PushersTable(Table): "id", "user_name", "kind", + "instance_handle", "app_id", "app_display_name", "device_display_name", diff --git a/synapse/storage/schema/delta/v10.sql b/synapse/storage/schema/delta/v10.sql index 689d2dff8b..b84ce20ef3 100644 --- a/synapse/storage/schema/delta/v10.sql +++ b/synapse/storage/schema/delta/v10.sql @@ -16,6 +16,7 @@ CREATE TABLE IF NOT EXISTS pushers ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT NOT NULL, + instance_handle varchar(32) NOT NULL, kind varchar(8) NOT NULL, app_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index 689d2dff8b..b84ce20ef3 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -16,6 +16,7 @@ CREATE TABLE IF NOT EXISTS pushers ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT NOT NULL, + instance_handle varchar(32) NOT NULL, kind varchar(8) NOT NULL, app_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, -- cgit 1.5.1 From b1b85753d759f7127fbb1c4a95005fffd3da7f4d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Jan 2015 15:50:17 +0000 Subject: Add support for storing rejected events in EventContext and data stores --- synapse/events/snapshot.py | 1 + synapse/storage/__init__.py | 11 ++++++++--- synapse/storage/_base.py | 21 +++++++++++++-------- synapse/storage/rejections.py | 33 +++++++++++++++++++++++++++++++++ synapse/storage/schema/delta/v12.sql | 21 +++++++++++++++++++++ synapse/storage/schema/im.sql | 7 +++++++ 6 files changed, 83 insertions(+), 11 deletions(-) create mode 100644 synapse/storage/rejections.py create mode 100644 synapse/storage/schema/delta/v12.sql (limited to 'synapse/storage') diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 6bbba8d6ba..7e98bdef28 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -20,3 +20,4 @@ class EventContext(object): self.current_state = current_state self.auth_events = auth_events self.state_group = None + self.rejected = False diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4beb951b9f..015fcc8775 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -30,6 +30,7 @@ from .transactions import TransactionStore from .keys import KeyStore from .event_federation import EventFederationStore from .media_repository import MediaRepositoryStore +from .rejections import RejectionsStore from .state import StateStore from .signatures import SignatureStore @@ -66,7 +67,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 11 +SCHEMA_VERSION = 12 class _RollbackButIsFineException(Exception): @@ -82,6 +83,7 @@ class DataStore(RoomMemberStore, RoomStore, DirectoryStore, KeyStore, StateStore, SignatureStore, EventFederationStore, MediaRepositoryStore, + RejectionsStore, ): def __init__(self, hs): @@ -224,6 +226,9 @@ class DataStore(RoomMemberStore, RoomStore, if not outlier: self._store_state_groups_txn(txn, event, context) + if context.rejected: + self._store_rejections_txn(txn, event.event_id, context.rejected) + if current_state: txn.execute( "DELETE FROM current_state_events WHERE room_id = ?", @@ -262,7 +267,7 @@ class DataStore(RoomMemberStore, RoomStore, or_replace=True, ) - if is_new_state: + if is_new_state and not context.rejected: self._simple_insert_txn( txn, "current_state_events", @@ -288,7 +293,7 @@ class DataStore(RoomMemberStore, RoomStore, or_ignore=True, ) - if not backfilled: + if not backfilled and not context.rejected: self._simple_insert_txn( txn, table="state_forward_extremities", diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index f660fc6eaf..2075a018b2 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -458,10 +458,12 @@ class SQLBaseStore(object): return [e for e in events if e] def _get_event_txn(self, txn, event_id, check_redacted=True, - get_prev_content=False): + get_prev_content=False, allow_rejected=False): sql = ( - "SELECT internal_metadata, json, r.event_id FROM event_json as e " + "SELECT internal_metadata, json, r.event_id, reason " + "FROM event_json as e " "LEFT JOIN redactions as r ON e.event_id = r.redacts " + "LEFT JOIN rejections as rej on rej.event_id = e.event_id " "WHERE e.event_id = ? " "LIMIT 1 " ) @@ -473,13 +475,16 @@ class SQLBaseStore(object): if not res: return None - internal_metadata, js, redacted = res + internal_metadata, js, redacted, rejected_reason = res - return self._get_event_from_row_txn( - txn, internal_metadata, js, redacted, - check_redacted=check_redacted, - get_prev_content=get_prev_content, - ) + if allow_rejected or not rejected_reason: + return self._get_event_from_row_txn( + txn, internal_metadata, js, redacted, + check_redacted=check_redacted, + get_prev_content=get_prev_content, + ) + else: + return None def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted, check_redacted=True, get_prev_content=False): diff --git a/synapse/storage/rejections.py b/synapse/storage/rejections.py new file mode 100644 index 0000000000..7d38b31f44 --- /dev/null +++ b/synapse/storage/rejections.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2014, 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import SQLBaseStore + +import logging + +logger = logging.getLogger(__name__) + + +class RejectionsStore(SQLBaseStore): + def _store_rejections_txn(self, txn, event_id, reason): + self._simple_insert_txn( + txn, + table="rejections", + values={ + "event_id": event_id, + "reason": reason, + "last_failure": self._clock.time_msec(), + } + ) diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql new file mode 100644 index 0000000000..bd2a8b1bb5 --- /dev/null +++ b/synapse/storage/schema/delta/v12.sql @@ -0,0 +1,21 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS rejections( + event_id TEXT NOT NULL, + reason TEXT NOT NULL, + last_check TEXT NOT NULL, + CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE +); diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql index dd00c1cd2f..bc7c6b6ed5 100644 --- a/synapse/storage/schema/im.sql +++ b/synapse/storage/schema/im.sql @@ -123,3 +123,10 @@ CREATE TABLE IF NOT EXISTS room_hosts( ); CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); + +CREATE TABLE IF NOT EXISTS rejections( + event_id TEXT NOT NULL, + reason TEXT NOT NULL, + last_check TEXT NOT NULL, + CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE +); -- cgit 1.5.1 From dc93860619d56e88844e91f38f66341a32e4c704 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 22 Jan 2015 17:37:12 +0000 Subject: Add rest API & store for creating push rules Also make unrecognised request error look more like synapse errors because it makes it easier to throw them from within rest classes. --- synapse/rest/push_rule.py | 195 ++++++++++++++++++++++++++++++++++++++++++ synapse/storage/push_rule.py | 196 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 391 insertions(+) create mode 100644 synapse/rest/push_rule.py create mode 100644 synapse/storage/push_rule.py (limited to 'synapse/storage') diff --git a/synapse/rest/push_rule.py b/synapse/rest/push_rule.py new file mode 100644 index 0000000000..b5e74479cf --- /dev/null +++ b/synapse/rest/push_rule.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError +from base import RestServlet, client_path_pattern +from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException + +import json + + +class PushRuleRestServlet(RestServlet): + PATTERN = client_path_pattern("/pushrules/.*$") + + def rule_spec_from_path(self, path): + if len(path) < 2: + raise UnrecognizedRequestError() + if path[0] != 'pushrules': + raise UnrecognizedRequestError() + + scope = path[1] + path = path[2:] + if scope not in ['global', 'device']: + raise UnrecognizedRequestError() + + device = None + if scope == 'device': + if len(path) == 0: + raise UnrecognizedRequestError() + device = path[0] + path = path[1:] + + if len(path) == 0: + raise UnrecognizedRequestError() + + template = path[0] + path = path[1:] + + if len(path) == 0: + raise UnrecognizedRequestError() + + rule_id = path[0] + + spec = { + 'scope' : scope, + 'template': template, + 'rule_id': rule_id + } + if device: + spec['device'] = device + return spec + + def rule_tuple_from_request_object(self, rule_template, rule_id, req_obj): + if rule_template in ['override', 'underride']: + if 'conditions' not in req_obj: + raise InvalidRuleException("Missing 'conditions'") + conditions = req_obj['conditions'] + for c in conditions: + if 'kind' not in c: + raise InvalidRuleException("Condition without 'kind'") + elif rule_template == 'room': + conditions = [{ + 'kind': 'event_match', + 'key': 'room_id', + 'pattern': rule_id + }] + elif rule_template == 'sender': + conditions = [{ + 'kind': 'event_match', + 'key': 'user_id', + 'pattern': rule_id + }] + elif rule_template == 'content': + if 'pattern' not in req_obj: + raise InvalidRuleException("Content rule missing 'pattern'") + conditions = [{ + 'kind': 'event_match', + 'key': 'content.body', + 'pattern': req_obj['pattern'] + }] + else: + raise InvalidRuleException("Unknown rule template: %s" % (rule_template)) + + if 'actions' not in req_obj: + raise InvalidRuleException("No actions found") + actions = req_obj['actions'] + + for a in actions: + if a in ['notify', 'dont-notify', 'coalesce']: + pass + elif isinstance(a, dict) and 'set_sound' in a: + pass + else: + raise InvalidRuleException("Unrecognised action") + + return (conditions, actions) + + def priority_class_from_spec(self, spec): + map = { + 'underride': 0, + 'sender': 1, + 'room': 2, + 'content': 3, + 'override': 4 + } + + if spec['template'] not in map.keys(): + raise InvalidRuleException("Unknown template: %s" % (spec['kind'])) + pc = map[spec['template']] + + if spec['scope'] == 'device': + pc += 5 + + return pc + + @defer.inlineCallbacks + def on_PUT(self, request): + spec = self.rule_spec_from_path(request.postpath) + try: + priority_class = self.priority_class_from_spec(spec) + except InvalidRuleException as e: + raise SynapseError(400, e.message) + + user = yield self.auth.get_user_by_req(request) + + content = _parse_json(request) + + try: + (conditions, actions) = self.rule_tuple_from_request_object( + spec['template'], + spec['rule_id'], + content + ) + except InvalidRuleException as e: + raise SynapseError(400, e.message) + + before = request.args.get("before", None) + if before and len(before): + before = before[0] + after = request.args.get("after", None) + if after and len(after): + after = after[0] + + try: + yield self.hs.get_datastore().add_push_rule( + user_name=user.to_string(), + rule_id=spec['rule_id'], + priority_class=priority_class, + conditions=conditions, + actions=actions, + before=before, + after=after + ) + except InconsistentRuleException as e: + raise SynapseError(400, e.message) + except RuleNotFoundException: + raise SynapseError(400, "before/after rule not found") + + defer.returnValue((200, {})) + + def on_OPTIONS(self, _): + return 200, {} + + +class InvalidRuleException(Exception): + pass + + +# XXX: C+ped from rest/room.py - surely this should be common? +def _parse_json(request): + try: + content = json.loads(request.content.read()) + if type(content) != dict: + raise SynapseError(400, "Content must be a JSON object.", + errcode=Codes.NOT_JSON) + return content + except ValueError: + raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) + + +def register_servlets(hs, http_server): + PushRuleRestServlet(hs).register(http_server) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py new file mode 100644 index 0000000000..76c4557600 --- /dev/null +++ b/synapse/storage/push_rule.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from ._base import SQLBaseStore, Table +from twisted.internet import defer + +import logging +import copy +import json + +logger = logging.getLogger(__name__) + + +class PushRuleStore(SQLBaseStore): + @defer.inlineCallbacks + def get_push_rules_for_user_name(self, user_name): + sql = ( + "SELECT "+",".join(PushRuleTable.fields)+ + "FROM pushers " + "WHERE user_name = ?" + ) + + rows = yield self._execute(None, sql, user_name) + + dicts = [] + for r in rows: + d = {} + for i, f in enumerate(PushRuleTable.fields): + d[f] = r[i] + dicts.append(d) + + defer.returnValue(dicts) + + @defer.inlineCallbacks + def add_push_rule(self, **kwargs): + vals = copy.copy(kwargs) + if 'conditions' in vals: + vals['conditions'] = json.dumps(vals['conditions']) + if 'actions' in vals: + vals['actions'] = json.dumps(vals['actions']) + # we could check the rest of the keys are valid column names + # but sqlite will do that anyway so I think it's just pointless. + if 'id' in vals: + del vals['id'] + + if 'after' in kwargs or 'before' in kwargs: + ret = yield self.runInteraction( + "_add_push_rule_relative_txn", + self._add_push_rule_relative_txn, + **vals + ) + defer.returnValue(ret) + else: + ret = yield self.runInteraction( + "_add_push_rule_highest_priority_txn", + self._add_push_rule_highest_priority_txn, + **vals + ) + defer.returnValue(ret) + + def _add_push_rule_relative_txn(self, txn, user_name, **kwargs): + after = None + relative_to_rule = None + if 'after' in kwargs and kwargs['after']: + after = kwargs['after'] + relative_to_rule = after + if 'before' in kwargs and kwargs['before']: + relative_to_rule = kwargs['before'] + + # get the priority of the rule we're inserting after/before + sql = ( + "SELECT priority_class, priority FROM "+PushRuleTable.table_name+ + " WHERE user_name = ? and rule_id = ?" + ) + txn.execute(sql, (user_name, relative_to_rule)) + res = txn.fetchall() + if not res: + raise RuleNotFoundException() + (priority_class, base_rule_priority) = res[0] + + if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class: + raise InconsistentRuleException( + "Given priority class does not match class of relative rule" + ) + + new_rule = copy.copy(kwargs) + if 'before' in new_rule: + del new_rule['before'] + if 'after' in new_rule: + del new_rule['after'] + new_rule['priority_class'] = priority_class + new_rule['user_name'] = user_name + + # check if the priority before/after is free + new_rule_priority = base_rule_priority + if after: + new_rule_priority -= 1 + else: + new_rule_priority += 1 + + new_rule['priority'] = new_rule_priority + + sql = ( + "SELECT COUNT(*) FROM "+PushRuleTable.table_name+ + " WHERE user_name = ? AND priority_class = ? AND priority = ?" + ) + txn.execute(sql, (user_name, priority_class, new_rule_priority)) + res = txn.fetchall() + num_conflicting = res[0][0] + + # if there are conflicting rules, bump everything + if num_conflicting: + sql = "UPDATE "+PushRuleTable.table_name+" SET priority = priority " + if after: + sql += "-1" + else: + sql += "+1" + sql += " WHERE user_name = ? AND priority_class = ? AND priority " + if after: + sql += "<= ?" + else: + sql += ">= ?" + + txn.execute(sql, (user_name, priority_class, new_rule_priority)) + + # now insert the new rule + sql = "INSERT OR REPLACE INTO "+PushRuleTable.table_name+" (" + sql += ",".join(new_rule.keys())+") VALUES (" + sql += ", ".join(["?" for _ in new_rule.keys()])+")" + + txn.execute(sql, new_rule.values()) + + def _add_push_rule_highest_priority_txn(self, txn, user_name, priority_class, **kwargs): + # find the highest priority rule in that class + sql = ( + "SELECT COUNT(*), MAX(priority) FROM "+PushRuleTable.table_name+ + " WHERE user_name = ? and priority_class = ?" + ) + txn.execute(sql, (user_name, priority_class)) + res = txn.fetchall() + (how_many, highest_prio) = res[0] + + new_prio = 0 + if how_many > 0: + new_prio = highest_prio + 1 + + # and insert the new rule + new_rule = copy.copy(kwargs) + if 'id' in new_rule: + del new_rule['id'] + new_rule['user_name'] = user_name + new_rule['priority_class'] = priority_class + new_rule['priority'] = new_prio + + sql = "INSERT OR REPLACE INTO "+PushRuleTable.table_name+" (" + sql += ",".join(new_rule.keys())+") VALUES (" + sql += ", ".join(["?" for _ in new_rule.keys()])+")" + + txn.execute(sql, new_rule.values()) + +class RuleNotFoundException(Exception): + pass + + +class InconsistentRuleException(Exception): + pass + + +class PushRuleTable(Table): + table_name = "push_rules" + + fields = [ + "id", + "user_name", + "rule_id", + "priority_class", + "priority", + "conditions", + "actions", + ] + + EntryType = collections.namedtuple("PushRuleEntry", fields) \ No newline at end of file -- cgit 1.5.1 From ede491b4e0c14d44ce43dd5b152abf148b54b9ed Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 22 Jan 2015 17:38:53 +0000 Subject: Oops: second part of commit dc938606 --- synapse/api/errors.py | 12 ++++++++++++ synapse/http/server.py | 8 ++------ synapse/rest/__init__.py | 3 ++- synapse/storage/__init__.py | 3 +++ synapse/storage/schema/delta/v10.sql | 13 +++++++++++++ synapse/storage/schema/pusher.sql | 13 +++++++++++++ 6 files changed, 45 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index a4155aebae..55181fe77e 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -21,6 +21,7 @@ logger = logging.getLogger(__name__) class Codes(object): + UNRECOGNIZED = "M_UNRECOGNIZED" UNAUTHORIZED = "M_UNAUTHORIZED" FORBIDDEN = "M_FORBIDDEN" BAD_JSON = "M_BAD_JSON" @@ -82,6 +83,17 @@ class RegistrationError(SynapseError): pass +class UnrecognizedRequestError(SynapseError): + """An error indicating we don't understand the request you're trying to make""" + def __init__(self, *args, **kwargs): + if "errcode" not in kwargs: + kwargs["errcode"] = Codes.NOT_FOUND + super(UnrecognizedRequestError, self).__init__( + 400, + "Unrecognized request", + **kwargs + ) + class AuthError(SynapseError): """An error raised when there was a problem authorising an event.""" diff --git a/synapse/http/server.py b/synapse/http/server.py index 8015a22edf..0f6539e1be 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -16,7 +16,7 @@ from synapse.http.agent_name import AGENT_NAME from synapse.api.errors import ( - cs_exception, SynapseError, CodeMessageException + cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError ) from synapse.util.logcontext import LoggingContext @@ -139,11 +139,7 @@ class JsonResource(HttpServer, resource.Resource): return # Huh. No one wanted to handle that? Fiiiiiine. Send 400. - self._send_response( - request, - 400, - {"error": "Unrecognized request"} - ) + raise UnrecognizedRequestError() except CodeMessageException as e: if isinstance(e, SynapseError): logger.info("%s SynapseError: %s - %s", request, e.code, e.msg) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 59521d0c77..8e5877cf3f 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -16,7 +16,7 @@ from . import ( room, events, register, login, profile, presence, initial_sync, directory, - voip, admin, pusher, + voip, admin, pusher, push_rule ) @@ -46,3 +46,4 @@ class RestServletFactory(object): voip.register_servlets(hs, client_resource) admin.register_servlets(hs, client_resource) pusher.register_servlets(hs, client_resource) + push_rule.register_servlets(hs, client_resource) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 191fe462a5..11706676d0 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -30,6 +30,7 @@ from .transactions import TransactionStore from .keys import KeyStore from .event_federation import EventFederationStore from .pusher import PusherStore +from .push_rule import PushRuleStore from .media_repository import MediaRepositoryStore from .state import StateStore @@ -62,6 +63,7 @@ SCHEMAS = [ "event_edges", "event_signatures", "pusher", + "push_rules", "media_repository", ] @@ -85,6 +87,7 @@ class DataStore(RoomMemberStore, RoomStore, EventFederationStore, MediaRepositoryStore, PusherStore, + PushRuleStore ): def __init__(self, hs): diff --git a/synapse/storage/schema/delta/v10.sql b/synapse/storage/schema/delta/v10.sql index b84ce20ef3..8c4dfd5c1b 100644 --- a/synapse/storage/schema/delta/v10.sql +++ b/synapse/storage/schema/delta/v10.sql @@ -31,3 +31,16 @@ CREATE TABLE IF NOT EXISTS pushers ( FOREIGN KEY(user_name) REFERENCES users(name), UNIQUE (app_id, pushkey) ); + +CREATE TABLE IF NOT EXISTS push_rules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + priority_class TINYINT NOT NULL, + priority INTEGER NOT NULL DEFAULT 0, + conditions TEXT NOT NULL, + actions TEXT NOT NULL, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index b84ce20ef3..8c4dfd5c1b 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -31,3 +31,16 @@ CREATE TABLE IF NOT EXISTS pushers ( FOREIGN KEY(user_name) REFERENCES users(name), UNIQUE (app_id, pushkey) ); + +CREATE TABLE IF NOT EXISTS push_rules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + priority_class TINYINT NOT NULL, + priority INTEGER NOT NULL DEFAULT 0, + conditions TEXT NOT NULL, + actions TEXT NOT NULL, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); -- cgit 1.5.1 From 7ecb49ef25937558f1a19a8fe47879d4b9116316 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 22 Jan 2015 17:53:30 +0000 Subject: Insufficient newlines --- synapse/storage/push_rule.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 76c4557600..dbbb35b2ab 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -172,6 +172,7 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, new_rule.values()) + class RuleNotFoundException(Exception): pass -- cgit 1.5.1 From 673773b21701c91997512d568bcc8d49a5586b3a Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 22 Jan 2015 18:27:07 +0000 Subject: oops, this is not its own schema file --- synapse/storage/__init__.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 11706676d0..8f56d90d95 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -63,7 +63,6 @@ SCHEMAS = [ "event_edges", "event_signatures", "pusher", - "push_rules", "media_repository", ] -- cgit 1.5.1 From 8a850573c9cf50dd83ba47c033b28fe2bbbaf9d4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 22 Jan 2015 19:32:17 +0000 Subject: As yet fairly untested GET API for push rules --- synapse/api/errors.py | 14 +++- synapse/rest/client/v1/push_rule.py | 138 +++++++++++++++++++++++++++++++++--- synapse/storage/push_rule.py | 8 +-- 3 files changed, 145 insertions(+), 15 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 55181fe77e..01207282d6 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -87,13 +87,25 @@ class UnrecognizedRequestError(SynapseError): """An error indicating we don't understand the request you're trying to make""" def __init__(self, *args, **kwargs): if "errcode" not in kwargs: - kwargs["errcode"] = Codes.NOT_FOUND + kwargs["errcode"] = Codes.UNRECOGNIZED super(UnrecognizedRequestError, self).__init__( 400, "Unrecognized request", **kwargs ) + +class NotFoundError(SynapseError): + """An error indicating we can't find the thing you asked for""" + def __init__(self, *args, **kwargs): + if "errcode" not in kwargs: + kwargs["errcode"] = Codes.NOT_FOUND + super(UnrecognizedRequestError, self).__init__( + 404, + "Not found", + **kwargs + ) + class AuthError(SynapseError): """An error raised when there was a problem authorising an event.""" diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index b5e74479cf..2803c1f071 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -15,7 +15,7 @@ from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError +from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError, NotFoundError from base import RestServlet, client_path_pattern from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException @@ -24,6 +24,14 @@ import json class PushRuleRestServlet(RestServlet): PATTERN = client_path_pattern("/pushrules/.*$") + PRIORITY_CLASS_MAP = { + 'underride': 0, + 'sender': 1, + 'room': 2, + 'content': 3, + 'override': 4 + } + PRIORITY_CLASS_INVERSE_MAP = {v: k for k,v in PRIORITY_CLASS_MAP.items()} def rule_spec_from_path(self, path): if len(path) < 2: @@ -109,15 +117,7 @@ class PushRuleRestServlet(RestServlet): return (conditions, actions) def priority_class_from_spec(self, spec): - map = { - 'underride': 0, - 'sender': 1, - 'room': 2, - 'content': 3, - 'override': 4 - } - - if spec['template'] not in map.keys(): + if spec['template'] not in PushRuleRestServlet.PRIORITY_CLASS_MAP.keys(): raise InvalidRuleException("Unknown template: %s" % (spec['kind'])) pc = map[spec['template']] @@ -171,10 +171,128 @@ class PushRuleRestServlet(RestServlet): defer.returnValue((200, {})) + @defer.inlineCallbacks + def on_GET(self, request): + user = yield self.auth.get_user_by_req(request) + + # we build up the full structure and then decide which bits of it + # to send which means doing unnecessary work sometimes but is + # is probably not going to make a whole lot of difference + rawrules = yield self.hs.get_datastore().get_push_rules_for_user_name(user.to_string()) + + rules = {'global': {}, 'device': {}} + + rules['global'] = _add_empty_priority_class_arrays(rules['global']) + + for r in rawrules: + rulearray = None + + r["conditions"] = json.loads(r["conditions"]) + r["actions"] = json.loads(r["actions"]) + + template_name = _priority_class_to_template_name(r['priority_class']) + + if r['priority_class'] > PushRuleRestServlet.PRIORITY_CLASS_MAP['override']: + # per-device rule + instance_handle = _instance_handle_from_conditions(r["conditions"]) + if not instance_handle: + continue + if instance_handle not in rules['device']: + rules['device'][instance_handle] = [] + rules['device'][instance_handle] = \ + _add_empty_priority_class_arrays(rules['device'][instance_handle]) + + rulearray = rules['device'][instance_handle] + else: + rulearray = rules['global'][template_name] + + template_rule = _rule_to_template(r) + if template_rule: + rulearray.append(template_rule) + + path = request.postpath[1:] + if path == []: + defer.returnValue((200, rules)) + + if path[0] == 'global': + path = path[1:] + result = _filter_ruleset_with_path(rules['global'], path) + defer.returnValue((200, result)) + elif path[0] == 'device': + path = path[1:] + if path == []: + raise UnrecognizedRequestError + instance_handle = path[0] + if instance_handle not in rules['device']: + ret = {} + ret = _add_empty_priority_class_arrays(ret) + defer.returnValue((200, ret)) + ruleset = rules['device'][instance_handle] + result = _filter_ruleset_with_path(ruleset, path) + defer.returnValue((200, result)) + else: + raise UnrecognizedRequestError() + + def on_OPTIONS(self, _): return 200, {} +def _add_empty_priority_class_arrays(d): + for pc in PushRuleRestServlet.PRIORITY_CLASS_MAP.keys(): + d[pc] = [] + return d + +def _instance_handle_from_conditions(conditions): + """ + Given a list of conditions, return the instance handle of the + device rule if there is one + """ + for c in conditions: + if c['kind'] == 'device': + return c['instance_handle'] + return None + +def _filter_ruleset_with_path(ruleset, path): + if path == []: + return ruleset + template_kind = path[0] + if template_kind not in ruleset: + raise UnrecognizedRequestError() + path = path[1:] + if path == []: + return ruleset[template_kind] + rule_id = path[0] + for r in ruleset[template_kind]: + if r['rule_id'] == rule_id: + return r + raise NotFoundError + +def _priority_class_to_template_name(pc): + if pc > PushRuleRestServlet.PRIORITY_CLASS_MAP['override']: + # per-device + prio_class_index = pc - PushRuleRestServlet.PRIORITY_CLASS_MAP['override'] + return PushRuleRestServlet.PRIORITY_CLASS_INVERSE_MAP[prio_class_index] + else: + return PushRuleRestServlet.PRIORITY_CLASS_INVERSE_MAP[pc] + +def _rule_to_template(rule): + template_name = _priority_class_to_template_name(rule['priority_class']) + if template_name in ['override', 'underride']: + return {k:rule[k] for k in ["rule_id", "conditions", "actions"]} + elif template_name in ["sender", "room"]: + return {k:rule[k] for k in ["rule_id", "actions"]} + elif template_name == 'content': + if len(rule["conditions"]) != 1: + return None + thecond = rule["conditions"][0] + if "pattern" not in thecond: + return None + ret = {k:rule[k] for k in ["rule_id", "actions"]} + ret["pattern"] = thecond["pattern"] + return ret + + class InvalidRuleException(Exception): pass diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index dbbb35b2ab..d087257ffc 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -29,11 +29,11 @@ class PushRuleStore(SQLBaseStore): @defer.inlineCallbacks def get_push_rules_for_user_name(self, user_name): sql = ( - "SELECT "+",".join(PushRuleTable.fields)+ - "FROM pushers " - "WHERE user_name = ?" + "SELECT "+",".join(PushRuleTable.fields)+" " + "FROM "+PushRuleTable.table_name+" " + "WHERE user_name = ? " + "ORDER BY priority_class DESC, priority DESC" ) - rows = yield self._execute(None, sql, user_name) dicts = [] -- cgit 1.5.1 From bcd48b9636071543fa64e7fb066275d1c9c1e363 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 23 Jan 2015 10:28:25 +0000 Subject: Fix adding rules without before/after & add the rule that we couldn't find to the error --- synapse/rest/client/v1/push_rule.py | 4 ++-- synapse/storage/push_rule.py | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 7df3fc7f09..77a0772479 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -166,8 +166,8 @@ class PushRuleRestServlet(RestServlet): ) except InconsistentRuleException as e: raise SynapseError(400, e.message) - except RuleNotFoundException: - raise SynapseError(400, "before/after rule not found") + except RuleNotFoundException as e: + raise SynapseError(400, e.message) defer.returnValue((200, {})) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index d087257ffc..2366090e09 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -46,7 +46,7 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(dicts) @defer.inlineCallbacks - def add_push_rule(self, **kwargs): + def add_push_rule(self, before, after, **kwargs): vals = copy.copy(kwargs) if 'conditions' in vals: vals['conditions'] = json.dumps(vals['conditions']) @@ -57,10 +57,12 @@ class PushRuleStore(SQLBaseStore): if 'id' in vals: del vals['id'] - if 'after' in kwargs or 'before' in kwargs: + if before or after: ret = yield self.runInteraction( "_add_push_rule_relative_txn", self._add_push_rule_relative_txn, + before=before, + after=after, **vals ) defer.returnValue(ret) @@ -89,7 +91,7 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, (user_name, relative_to_rule)) res = txn.fetchall() if not res: - raise RuleNotFoundException() + raise RuleNotFoundException("before/after rule not found: %s" % (relative_to_rule)) (priority_class, base_rule_priority) = res[0] if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class: -- cgit 1.5.1 From 5759bec43cb52862a8d455afb8cd9d1c5660bc3d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 23 Jan 2015 11:47:15 +0000 Subject: Replace hs.parse_userid with UserID.from_string --- synapse/api/auth.py | 9 +++++---- synapse/handlers/_base.py | 5 +++-- synapse/handlers/events.py | 3 ++- synapse/handlers/federation.py | 13 +++++++------ synapse/handlers/message.py | 13 +++++++------ synapse/handlers/presence.py | 23 ++++++++++++----------- synapse/handlers/profile.py | 3 ++- synapse/handlers/room.py | 14 +++++++------- synapse/handlers/typing.py | 3 ++- synapse/rest/client/v1/admin.py | 4 +++- synapse/rest/client/v1/presence.py | 15 ++++++++------- synapse/rest/client/v1/profile.py | 13 +++++++------ synapse/rest/client/v1/room.py | 5 +++-- synapse/server.py | 6 ------ synapse/storage/roommember.py | 5 +++-- tests/handlers/test_presence.py | 29 +++++++++++++++-------------- tests/handlers/test_presencelike.py | 9 +++++---- tests/handlers/test_profile.py | 8 ++++---- tests/handlers/test_room.py | 9 +++++---- tests/handlers/test_typing.py | 7 ++++--- tests/rest/client/v1/test_presence.py | 19 ++++++++++--------- tests/rest/client/v1/test_profile.py | 3 ++- tests/rest/client/v1/test_rooms.py | 21 +++++++++------------ tests/rest/client/v1/test_typing.py | 5 +++-- tests/storage/test_presence.py | 5 +++-- tests/storage/test_profile.py | 3 ++- tests/storage/test_redaction.py | 5 +++-- tests/storage/test_room.py | 3 ++- tests/storage/test_roommember.py | 7 ++++--- tests/storage/test_stream.py | 5 +++-- tests/test_types.py | 6 ------ 31 files changed, 145 insertions(+), 133 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e31482cfaa..a342a0e0da 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes, Membership, JoinRules from synapse.api.errors import AuthError, StoreError, Codes, SynapseError from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor +from synapse.types import UserID import logging @@ -104,7 +105,7 @@ class Auth(object): for event in curr_state: if event.type == EventTypes.Member: try: - if self.hs.parse_userid(event.state_key).domain != host: + if UserID.from_string(event.state_key).domain != host: continue except: logger.warn("state_key not user_id: %s", event.state_key) @@ -337,7 +338,7 @@ class Auth(object): user_info = { "admin": bool(ret.get("admin", False)), "device_id": ret.get("device_id"), - "user": self.hs.parse_userid(ret.get("name")), + "user": UserID.from_string(ret.get("name")), } defer.returnValue(user_info) @@ -461,7 +462,7 @@ class Auth(object): "You are not allowed to set others state" ) else: - sender_domain = self.hs.parse_userid( + sender_domain = UserID.from_string( event.user_id ).domain @@ -496,7 +497,7 @@ class Auth(object): # Validate users for k, v in user_list.items(): try: - self.hs.parse_userid(k) + UserID.from_string(k) except: raise SynapseError(400, "Not a valid user_id: %s" % (k,)) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index f33d17a31e..1773fa20aa 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -19,6 +19,7 @@ from synapse.api.errors import LimitExceededError, SynapseError from synapse.util.async import run_on_reactor from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.api.constants import Membership, EventTypes +from synapse.types import UserID import logging @@ -113,7 +114,7 @@ class BaseHandler(object): if event.type == EventTypes.Member: if event.content["membership"] == Membership.INVITE: - invitee = self.hs.parse_userid(event.state_key) + invitee = UserID.from_string(event.state_key) if not self.hs.is_mine(invitee): # TODO: Can we add signature from remote server in a nicer # way? If we have been invited by a remote server, we need @@ -134,7 +135,7 @@ class BaseHandler(object): if k[0] == EventTypes.Member: if s.content["membership"] == Membership.JOIN: destinations.add( - self.hs.parse_userid(s.state_key).domain + UserID.from_string(s.state_key).domain ) except SynapseError: logger.warn( diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 103bc67c42..01e67b0818 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -17,6 +17,7 @@ from twisted.internet import defer from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function +from synapse.types import UserID from ._base import BaseHandler @@ -48,7 +49,7 @@ class EventStreamHandler(BaseHandler): @log_function def get_stream(self, auth_user_id, pagin_config, timeout=0, as_client_event=True): - auth_user = self.hs.parse_userid(auth_user_id) + auth_user = UserID.from_string(auth_user_id) try: if auth_user not in self._streams_per_user: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 81203bf1a3..bcdcc90a18 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -28,6 +28,7 @@ from synapse.crypto.event_signing import ( compute_event_signature, check_event_content_hash, add_hashes_and_signatures, ) +from synapse.types import UserID from syutil.jsonutil import encode_canonical_json from twisted.internet import defer @@ -227,7 +228,7 @@ class FederationHandler(BaseHandler): extra_users = [] if event.type == EventTypes.Member: target_user_id = event.state_key - target_user = self.hs.parse_userid(target_user_id) + target_user = UserID.from_string(target_user_id) extra_users.append(target_user) yield self.notifier.on_new_room_event( @@ -236,7 +237,7 @@ class FederationHandler(BaseHandler): if event.type == EventTypes.Member: if event.membership == Membership.JOIN: - user = self.hs.parse_userid(event.state_key) + user = UserID.from_string(event.state_key) yield self.distributor.fire( "user_joined_room", user=user, room_id=event.room_id ) @@ -491,7 +492,7 @@ class FederationHandler(BaseHandler): extra_users = [] if event.type == EventTypes.Member: target_user_id = event.state_key - target_user = self.hs.parse_userid(target_user_id) + target_user = UserID.from_string(target_user_id) extra_users.append(target_user) yield self.notifier.on_new_room_event( @@ -500,7 +501,7 @@ class FederationHandler(BaseHandler): if event.type == EventTypes.Member: if event.content["membership"] == Membership.JOIN: - user = self.hs.parse_userid(event.state_key) + user = UserID.from_string(event.state_key) yield self.distributor.fire( "user_joined_room", user=user, room_id=event.room_id ) @@ -514,7 +515,7 @@ class FederationHandler(BaseHandler): if k[0] == EventTypes.Member: if s.content["membership"] == Membership.JOIN: destinations.add( - self.hs.parse_userid(s.state_key).domain + UserID.from_string(s.state_key).domain ) except: logger.warn( @@ -565,7 +566,7 @@ class FederationHandler(BaseHandler): backfilled=False, ) - target_user = self.hs.parse_userid(event.state_key) + target_user = UserID.from_string(event.state_key) yield self.notifier.on_new_room_event( event, extra_users=[target_user], ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index f2a2f16933..6a1104a890 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -20,6 +20,7 @@ from synapse.api.errors import RoomError from synapse.streams.config import PaginationConfig from synapse.events.validator import EventValidator from synapse.util.logcontext import PreserveLoggingContext +from synapse.types import UserID from ._base import BaseHandler @@ -89,7 +90,7 @@ class MessageHandler(BaseHandler): yield self.hs.get_event_sources().get_current_token() ) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) events, next_key = yield data_source.get_pagination_rows( user, pagin_config.get_source_config("room"), room_id @@ -130,13 +131,13 @@ class MessageHandler(BaseHandler): if ratelimit: self.ratelimit(builder.user_id) # TODO(paul): Why does 'event' not have a 'user' object? - user = self.hs.parse_userid(builder.user_id) + user = UserID.from_string(builder.user_id) assert self.hs.is_mine(user), "User must be our own: %s" % (user,) if builder.type == EventTypes.Member: membership = builder.content.get("membership", None) if membership == Membership.JOIN: - joinee = self.hs.parse_userid(builder.state_key) + joinee = UserID.from_string(builder.state_key) # If event doesn't include a display name, add one. yield self.distributor.fire( "collect_presencelike_data", @@ -237,7 +238,7 @@ class MessageHandler(BaseHandler): membership_list=[Membership.INVITE, Membership.JOIN] ) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) rooms_ret = [] @@ -316,7 +317,7 @@ class MessageHandler(BaseHandler): # TODO(paul): I wish I was called with user objects not user_id # strings... - auth_user = self.hs.parse_userid(user_id) + auth_user = UserID.from_string(user_id) # TODO: These concurrently state_tuples = yield self.state_handler.get_current_state(room_id) @@ -349,7 +350,7 @@ class MessageHandler(BaseHandler): for m in room_members: try: member_presence = yield presence_handler.get_state( - target_user=self.hs.parse_userid(m.user_id), + target_user=UserID.from_string(m.user_id), auth_user=auth_user, as_event=True, ) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8aeed99274..d66bfea7b1 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -20,6 +20,7 @@ from synapse.api.constants import PresenceState from synapse.util.logutils import log_function from synapse.util.logcontext import PreserveLoggingContext +from synapse.types import UserID from ._base import BaseHandler @@ -96,22 +97,22 @@ class PresenceHandler(BaseHandler): self.federation.register_edu_handler( "m.presence_invite", lambda origin, content: self.invite_presence( - observed_user=hs.parse_userid(content["observed_user"]), - observer_user=hs.parse_userid(content["observer_user"]), + observed_user=UserID.from_string(content["observed_user"]), + observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_accept", lambda origin, content: self.accept_presence( - observed_user=hs.parse_userid(content["observed_user"]), - observer_user=hs.parse_userid(content["observer_user"]), + observed_user=UserID.from_string(content["observed_user"]), + observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_deny", lambda origin, content: self.deny_presence( - observed_user=hs.parse_userid(content["observed_user"]), - observer_user=hs.parse_userid(content["observer_user"]), + observed_user=UserID.from_string(content["observed_user"]), + observer_user=UserID.from_string(content["observer_user"]), ) ) @@ -418,7 +419,7 @@ class PresenceHandler(BaseHandler): ) for p in presence: - observed_user = self.hs.parse_userid(p.pop("observed_user_id")) + observed_user = UserID.from_string(p.pop("observed_user_id")) p["observed_user"] = observed_user p.update(self._get_or_offline_usercache(observed_user).get_state()) if "last_active" in p: @@ -441,7 +442,7 @@ class PresenceHandler(BaseHandler): user.localpart, accepted=True ) target_users = set([ - self.hs.parse_userid(x["observed_user_id"]) for x in presence + UserID.from_string(x["observed_user_id"]) for x in presence ]) # Also include people in all my rooms @@ -646,7 +647,7 @@ class PresenceHandler(BaseHandler): deferreds = [] for push in content.get("push", []): - user = self.hs.parse_userid(push["user_id"]) + user = UserID.from_string(push["user_id"]) logger.debug("Incoming presence update from %s", user) @@ -694,7 +695,7 @@ class PresenceHandler(BaseHandler): del self._user_cachemap[user] for poll in content.get("poll", []): - user = self.hs.parse_userid(poll) + user = UserID.from_string(poll) if not self.hs.is_mine(user): continue @@ -709,7 +710,7 @@ class PresenceHandler(BaseHandler): deferreds.append(self._push_presence_remote(user, origin)) for unpoll in content.get("unpoll", []): - user = self.hs.parse_userid(unpoll) + user = UserID.from_string(unpoll) if not self.hs.is_mine(user): continue diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 7777d3cc94..03b2159c53 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError, CodeMessageException from synapse.api.constants import EventTypes, Membership from synapse.util.logcontext import PreserveLoggingContext +from synapse.types import UserID from ._base import BaseHandler @@ -169,7 +170,7 @@ class ProfileHandler(BaseHandler): @defer.inlineCallbacks def on_profile_query(self, args): - user = self.hs.parse_userid(args["user_id"]) + user = UserID.from_string(args["user_id"]) if not self.hs.is_mine(user): raise SynapseError(400, "User is not hosted on this Home Server") diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 6d0db18e51..0242288c4e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -64,7 +64,7 @@ class RoomCreationHandler(BaseHandler): invite_list = config.get("invite", []) for i in invite_list: try: - self.hs.parse_userid(i) + UserID.from_string(i) except: raise SynapseError(400, "Invalid user_id: %s" % (i,)) @@ -114,7 +114,7 @@ class RoomCreationHandler(BaseHandler): servers=[self.hs.hostname], ) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) creation_events = self._create_events_for_new_room( user, room_id, is_public=is_public ) @@ -250,7 +250,7 @@ class RoomMemberHandler(BaseHandler): users = yield self.store.get_users_in_room(room_id) - defer.returnValue([hs.parse_userid(u) for u in users]) + defer.returnValue([UserID.from_string(u) for u in users]) @defer.inlineCallbacks def fetch_room_distributions_into(self, room_id, localusers=None, @@ -368,7 +368,7 @@ class RoomMemberHandler(BaseHandler): ) if prev_state and prev_state.membership == Membership.JOIN: - user = self.hs.parse_userid(event.user_id) + user = UserID.from_string(event.user_id) self.distributor.fire( "user_left_room", user=user, room_id=event.room_id ) @@ -412,7 +412,7 @@ class RoomMemberHandler(BaseHandler): @defer.inlineCallbacks def _do_join(self, event, context, room_host=None, do_auth=True): - joinee = self.hs.parse_userid(event.state_key) + joinee = UserID.from_string(event.state_key) # room_id = RoomID.from_string(event.room_id, self.hs) room_id = event.room_id @@ -476,7 +476,7 @@ class RoomMemberHandler(BaseHandler): do_auth=do_auth, ) - user = self.hs.parse_userid(event.user_id) + user = UserID.from_string(event.user_id) yield self.distributor.fire( "user_joined_room", user=user, room_id=room_id ) @@ -526,7 +526,7 @@ class RoomMemberHandler(BaseHandler): do_auth): yield run_on_reactor() - target_user = self.hs.parse_userid(event.state_key) + target_user = UserID.from_string(event.state_key) yield self.handle_new_client_event( event, diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index cd9638dd04..c69787005f 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -18,6 +18,7 @@ from twisted.internet import defer from ._base import BaseHandler from synapse.api.errors import SynapseError, AuthError +from synapse.types import UserID import logging @@ -185,7 +186,7 @@ class TypingNotificationHandler(BaseHandler): @defer.inlineCallbacks def _recv_edu(self, origin, content): room_id = content["room_id"] - user = self.homeserver.parse_userid(content["user_id"]) + user = UserID.from_string(content["user_id"]) localusers = set() diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 0aa83514c8..4aefb94053 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -16,6 +16,8 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError +from synapse.types import UserID + from base import RestServlet, client_path_pattern import logging @@ -28,7 +30,7 @@ class WhoisRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): - target_user = self.hs.parse_userid(user_id) + target_user = UserID.from_string(user_id) auth_user = yield self.auth.get_user_by_req(request) is_admin = yield self.auth.is_server_admin(auth_user) diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index ca4d2d21f0..22fcb7d7d0 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -18,7 +18,8 @@ from twisted.internet import defer from synapse.api.errors import SynapseError -from base import RestServlet, client_path_pattern +from synapse.types import UserID +from .base import RestServlet, client_path_pattern import json import logging @@ -32,7 +33,7 @@ class PresenceStatusRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): auth_user = yield self.auth.get_user_by_req(request) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) state = yield self.handlers.presence_handler.get_state( target_user=user, auth_user=auth_user) @@ -42,7 +43,7 @@ class PresenceStatusRestServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, user_id): auth_user = yield self.auth.get_user_by_req(request) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) state = {} try: @@ -77,7 +78,7 @@ class PresenceListRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): auth_user = yield self.auth.get_user_by_req(request) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) if not self.hs.is_mine(user): raise SynapseError(400, "User not hosted on this Home Server") @@ -97,7 +98,7 @@ class PresenceListRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, user_id): auth_user = yield self.auth.get_user_by_req(request) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) if not self.hs.is_mine(user): raise SynapseError(400, "User not hosted on this Home Server") @@ -118,7 +119,7 @@ class PresenceListRestServlet(RestServlet): raise SynapseError(400, "Bad invite value.") if len(u) == 0: continue - invited_user = self.hs.parse_userid(u) + invited_user = UserID.from_string(u) yield self.handlers.presence_handler.send_invite( observer_user=user, observed_user=invited_user ) @@ -129,7 +130,7 @@ class PresenceListRestServlet(RestServlet): raise SynapseError(400, "Bad drop value.") if len(u) == 0: continue - dropped_user = self.hs.parse_userid(u) + dropped_user = UserID.from_string(u) yield self.handlers.presence_handler.drop( observer_user=user, observed_user=dropped_user ) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index dc6eb424b0..39297930c8 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -16,7 +16,8 @@ """ This module contains REST servlets to do with profile: /profile/ """ from twisted.internet import defer -from base import RestServlet, client_path_pattern +from .base import RestServlet, client_path_pattern +from synapse.types import UserID import json @@ -26,7 +27,7 @@ class ProfileDisplaynameRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) displayname = yield self.handlers.profile_handler.get_displayname( user, @@ -37,7 +38,7 @@ class ProfileDisplaynameRestServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, user_id): auth_user = yield self.auth.get_user_by_req(request) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) try: content = json.loads(request.content.read()) @@ -59,7 +60,7 @@ class ProfileAvatarURLRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) avatar_url = yield self.handlers.profile_handler.get_avatar_url( user, @@ -70,7 +71,7 @@ class ProfileAvatarURLRestServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, user_id): auth_user = yield self.auth.get_user_by_req(request) - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) try: content = json.loads(request.content.read()) @@ -92,7 +93,7 @@ class ProfileRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) displayname = yield self.handlers.profile_handler.get_displayname( user, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 48bba2a5f3..c5837b3403 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -20,6 +20,7 @@ from base import RestServlet, client_path_pattern from synapse.api.errors import SynapseError, Codes from synapse.streams.config import PaginationConfig from synapse.api.constants import EventTypes, Membership +from synapse.types import UserID import json import logging @@ -289,7 +290,7 @@ class RoomMemberListRestServlet(RestServlet): for event in members["chunk"]: # FIXME: should probably be state_key here, not user_id - target_user = self.hs.parse_userid(event["user_id"]) + target_user = UserID.from_string(event["user_id"]) # Presence is an optional cache; don't fail if we can't fetch it try: presence_handler = self.handlers.presence_handler @@ -478,7 +479,7 @@ class RoomTypingRestServlet(RestServlet): auth_user = yield self.auth.get_user_by_req(request) room_id = urllib.unquote(room_id) - target_user = self.hs.parse_userid(urllib.unquote(user_id)) + target_user = UserID.from_string(urllib.unquote(user_id)) content = _parse_json(request) diff --git a/synapse/server.py b/synapse/server.py index 476d809374..52a21aaf78 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -127,12 +127,6 @@ class BaseHomeServer(object): # TODO: Why are these parse_ methods so high up along with other globals? # Surely these should be in a util package or in the api package? - # Other utility methods - def parse_userid(self, s): - """Parse the string given by 's' as a User ID and return a UserID - object.""" - return UserID.from_string(s) - def parse_roomalias(self, s): """Parse the string given by 's' as a Room Alias and return a RoomAlias object.""" diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index e59e65529b..c69dd995ce 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -20,6 +20,7 @@ from collections import namedtuple from ._base import SQLBaseStore from synapse.api.constants import Membership +from synapse.types import UserID import logging @@ -39,7 +40,7 @@ class RoomMemberStore(SQLBaseStore): """ try: target_user_id = event.state_key - domain = self.hs.parse_userid(target_user_id).domain + domain = UserID.from_string(target_user_id).domain except: logger.exception( "Failed to parse target_user_id=%s", target_user_id @@ -84,7 +85,7 @@ class RoomMemberStore(SQLBaseStore): for e in member_events: try: joined_domains.add( - self.hs.parse_userid(e.state_key).domain + UserID.from_string(e.state_key).domain ) except: # FIXME: How do we deal with invalid user ids in the db? diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 56e90177f1..5621a8afaf 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -17,7 +17,7 @@ from tests import unittest from twisted.internet import defer, reactor -from mock import Mock, call, ANY, NonCallableMock, patch +from mock import Mock, call, ANY, NonCallableMock import json from tests.utils import ( @@ -31,6 +31,7 @@ from synapse.api.errors import SynapseError from synapse.handlers.presence import PresenceHandler, UserPresenceCache from synapse.streams.config import SourcePaginationConfig from synapse.storage.transactions import DestinationsTable +from synapse.types import UserID OFFLINE = PresenceState.OFFLINE UNAVAILABLE = PresenceState.UNAVAILABLE @@ -170,9 +171,9 @@ class PresenceTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp_users(self, hs): # Some local users to test with - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") - self.u_clementine = hs.parse_userid("@clementine:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") + self.u_clementine = UserID.from_string("@clementine:test") for u in self.u_apple, self.u_banana, self.u_clementine: yield self.datastore.create_presence(u.localpart) @@ -182,10 +183,10 @@ class PresenceTestCase(unittest.TestCase): ) # ID of a local user that does not exist - self.u_durian = hs.parse_userid("@durian:test") + self.u_durian = UserID.from_string("@durian:test") # A remote user - self.u_cabbage = hs.parse_userid("@cabbage:elsewhere") + self.u_cabbage = UserID.from_string("@cabbage:elsewhere") class MockedDatastorePresenceTestCase(PresenceTestCase): @@ -250,16 +251,16 @@ class MockedDatastorePresenceTestCase(PresenceTestCase): @defer.inlineCallbacks def setUp_users(self, hs): # Some local users to test with - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") - self.u_clementine = hs.parse_userid("@clementine:test") - self.u_durian = hs.parse_userid("@durian:test") - self.u_elderberry = hs.parse_userid("@elderberry:test") - self.u_fig = hs.parse_userid("@fig:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") + self.u_clementine = UserID.from_string("@clementine:test") + self.u_durian = UserID.from_string("@durian:test") + self.u_elderberry = UserID.from_string("@elderberry:test") + self.u_fig = UserID.from_string("@fig:test") # Remote user - self.u_onion = hs.parse_userid("@onion:farm") - self.u_potato = hs.parse_userid("@potato:remote") + self.u_onion = UserID.from_string("@onion:farm") + self.u_potato = UserID.from_string("@potato:remote") yield diff --git a/tests/handlers/test_presencelike.py b/tests/handlers/test_presencelike.py index 0584e4c8b9..3cdbb186ae 100644 --- a/tests/handlers/test_presencelike.py +++ b/tests/handlers/test_presencelike.py @@ -27,6 +27,7 @@ from synapse.server import HomeServer from synapse.api.constants import PresenceState from synapse.handlers.presence import PresenceHandler from synapse.handlers.profile import ProfileHandler +from synapse.types import UserID OFFLINE = PresenceState.OFFLINE @@ -136,12 +137,12 @@ class PresenceProfilelikeDataTestCase(unittest.TestCase): lambda u: defer.succeed([])) # Some local users to test with - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") - self.u_clementine = hs.parse_userid("@clementine:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") + self.u_clementine = UserID.from_string("@clementine:test") # Remote user - self.u_potato = hs.parse_userid("@potato:remote") + self.u_potato = UserID.from_string("@potato:remote") self.mock_get_joined = ( self.datastore.get_rooms_for_user_where_membership_is diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 25b172aa5e..7b9590c110 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -22,7 +22,7 @@ from mock import Mock, NonCallableMock from synapse.api.errors import AuthError from synapse.server import HomeServer from synapse.handlers.profile import ProfileHandler -from synapse.api.constants import Membership +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool, MockKey @@ -71,9 +71,9 @@ class ProfileTestCase(unittest.TestCase): self.store = hs.get_datastore() - self.frank = hs.parse_userid("@1234ABCD:test") - self.bob = hs.parse_userid("@4567:test") - self.alice = hs.parse_userid("@alice:remote") + self.frank = UserID.from_string("@1234ABCD:test") + self.bob = UserID.from_string("@4567:test") + self.alice = UserID.from_string("@alice:remote") yield self.store.create_profile(self.frank.localpart) diff --git a/tests/handlers/test_room.py b/tests/handlers/test_room.py index d3253b48b8..9a23b3812d 100644 --- a/tests/handlers/test_room.py +++ b/tests/handlers/test_room.py @@ -15,12 +15,13 @@ from twisted.internet import defer -from tests import unittest +from .. import unittest from synapse.api.constants import EventTypes, Membership from synapse.handlers.room import RoomMemberHandler, RoomCreationHandler from synapse.handlers.profile import ProfileHandler from synapse.server import HomeServer +from synapse.types import UserID from ..utils import MockKey from mock import Mock, NonCallableMock @@ -164,7 +165,7 @@ class RoomMemberHandlerTestCase(unittest.TestCase): event, context=context, ) self.notifier.on_new_room_event.assert_called_once_with( - event, extra_users=[self.hs.parse_userid(target_user_id)] + event, extra_users=[UserID.from_string(target_user_id)] ) self.assertFalse(self.datastore.get_room.called) self.assertFalse(self.datastore.store_room.called) @@ -174,7 +175,7 @@ class RoomMemberHandlerTestCase(unittest.TestCase): def test_simple_join(self): room_id = "!foo:red" user_id = "@bob:red" - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) join_signal_observer = Mock() self.distributor.observe("user_joined_room", join_signal_observer) @@ -252,7 +253,7 @@ class RoomMemberHandlerTestCase(unittest.TestCase): def test_simple_leave(self): room_id = "!foo:red" user_id = "@bob:red" - user = self.hs.parse_userid(user_id) + user = UserID.from_string(user_id) builder = self.hs.get_event_builder_factory().new({ "type": EventTypes.Member, diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 6a498b23a4..8a7fc028d1 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -27,6 +27,7 @@ from synapse.server import HomeServer from synapse.handlers.typing import TypingNotificationHandler from synapse.storage.transactions import DestinationsTable +from synapse.types import UserID def _expect_edu(destination, edu_type, content, origin="test"): @@ -153,11 +154,11 @@ class TypingNotificationsTestCase(unittest.TestCase): self.auth.check_joined_room = check_joined_room # Some local users to test with - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") # Remote user - self.u_onion = hs.parse_userid("@onion:farm") + self.u_onion = UserID.from_string("@onion:farm") @defer.inlineCallbacks def test_started_typing_local(self): diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index 783720ac29..65d5cc4916 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -27,6 +27,7 @@ from synapse.handlers.presence import PresenceHandler from synapse.server import HomeServer from synapse.rest.client.v1 import presence from synapse.rest.client.v1 import events +from synapse.types import UserID OFFLINE = PresenceState.OFFLINE @@ -71,7 +72,7 @@ class PresenceStateTestCase(unittest.TestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(myid), + "user": UserID.from_string(myid), "admin": False, "device_id": None, } @@ -90,7 +91,7 @@ class PresenceStateTestCase(unittest.TestCase): presence.register_servlets(hs, self.mock_resource) - self.u_apple = hs.parse_userid(myid) + self.u_apple = UserID.from_string(myid) @defer.inlineCallbacks def test_get_my_status(self): @@ -161,12 +162,12 @@ class PresenceListTestCase(unittest.TestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(myid), + "user": UserID.from_string(myid), "admin": False, "device_id": None, } - room_member_handler = hs.handlers.room_member_handler = Mock( + hs.handlers.room_member_handler = Mock( spec=[ "get_rooms_for_user", ] @@ -176,8 +177,8 @@ class PresenceListTestCase(unittest.TestCase): presence.register_servlets(hs, self.mock_resource) - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") @defer.inlineCallbacks def test_get_my_list(self): @@ -281,7 +282,7 @@ class PresenceEventStreamTestCase(unittest.TestCase): hs.get_clock().time_msec.return_value = 1000000 def _get_user_by_req(req=None): - return hs.parse_userid(myid) + return UserID.from_string(myid) hs.get_auth().get_user_by_req = _get_user_by_req @@ -322,8 +323,8 @@ class PresenceEventStreamTestCase(unittest.TestCase): self.presence = hs.get_handlers().presence_handler - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") @defer.inlineCallbacks def test_shortpoll(self): diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 5b5c3edc22..39cd68d829 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -24,6 +24,7 @@ from ....utils import MockHttpResource, MockKey from synapse.api.errors import SynapseError, AuthError from synapse.server import HomeServer +from synapse.types import UserID from synapse.rest.client.v1 import profile @@ -57,7 +58,7 @@ class ProfileTestCase(unittest.TestCase): ) def _get_user_by_req(request=None): - return hs.parse_userid(myid) + return UserID.from_string(myid) hs.get_auth().get_user_by_req = _get_user_by_req diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 12f8040541..76ed550b75 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -22,13 +22,10 @@ import synapse.rest.client.v1.room from synapse.api.constants import Membership from synapse.server import HomeServer +from synapse.types import UserID -from tests import unittest - -# python imports import json import urllib -import types from ....utils import MockHttpResource, SQLiteMemoryDbPool, MockKey from .utils import RestTestCase @@ -70,7 +67,7 @@ class RoomPermissionsTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -466,7 +463,7 @@ class RoomsMemberListTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -555,7 +552,7 @@ class RoomsCreateTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -657,7 +654,7 @@ class RoomTopicTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -773,7 +770,7 @@ class RoomMemberStateTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -909,7 +906,7 @@ class RoomMessagesTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -1013,7 +1010,7 @@ class RoomInitialSyncTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -1028,7 +1025,7 @@ class RoomInitialSyncTestCase(RestTestCase): # Since I'm getting my own presence I need to exist as far as presence # is concerned. hs.get_handlers().presence_handler.registered_user( - hs.parse_userid(self.user_id) + UserID.from_string(self.user_id) ) # create the room diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 647bcebfd8..c89b37d004 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -20,6 +20,7 @@ from twisted.internet import defer import synapse.rest.client.v1.room from synapse.server import HomeServer +from synapse.types import UserID from ....utils import MockHttpResource, MockClock, SQLiteMemoryDbPool, MockKey from .utils import RestTestCase @@ -69,7 +70,7 @@ class RoomTypingTestCase(RestTestCase): def _get_user_by_token(token=None): return { - "user": hs.parse_userid(self.auth_user_id), + "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, } @@ -82,7 +83,7 @@ class RoomTypingTestCase(RestTestCase): def get_room_members(room_id): if room_id == self.room_id: - return defer.succeed([hs.parse_userid(self.user_id)]) + return defer.succeed([UserID.from_string(self.user_id)]) else: return defer.succeed([]) diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py index 9655d3cf42..1ab193736b 100644 --- a/tests/storage/test_presence.py +++ b/tests/storage/test_presence.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.server import HomeServer from synapse.storage.presence import PresenceStore +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool, MockClock @@ -37,8 +38,8 @@ class PresenceStoreTestCase(unittest.TestCase): self.store = PresenceStore(hs) - self.u_apple = hs.parse_userid("@apple:test") - self.u_banana = hs.parse_userid("@banana:test") + self.u_apple = UserID.from_string("@apple:test") + self.u_banana = UserID.from_string("@banana:test") @defer.inlineCallbacks def test_state(self): diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 5d36723c28..84381241bc 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.server import HomeServer from synapse.storage.profile import ProfileStore +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool @@ -36,7 +37,7 @@ class ProfileStoreTestCase(unittest.TestCase): self.store = ProfileStore(hs) - self.u_frank = hs.parse_userid("@frank:test") + self.u_frank = UserID.from_string("@frank:test") @defer.inlineCallbacks def test_displayname(self): diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 9806fbc69b..a16ccad881 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.server import HomeServer from synapse.api.constants import EventTypes, Membership +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool, MockKey @@ -48,8 +49,8 @@ class RedactionTestCase(unittest.TestCase): self.handlers = hs.get_handlers() self.message_handler = self.handlers.message_handler - self.u_alice = hs.parse_userid("@alice:test") - self.u_bob = hs.parse_userid("@bob:test") + self.u_alice = UserID.from_string("@alice:test") + self.u_bob = UserID.from_string("@bob:test") self.room1 = hs.parse_roomid("!abc123:test") diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index e7739776ec..c6bfde069a 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.server import HomeServer from synapse.api.constants import EventTypes +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool @@ -40,7 +41,7 @@ class RoomStoreTestCase(unittest.TestCase): self.room = hs.parse_roomid("!abcde:test") self.alias = hs.parse_roomalias("#a-room-name:test") - self.u_creator = hs.parse_userid("@creator:test") + self.u_creator = UserID.from_string("@creator:test") yield self.store.store_room(self.room.to_string(), room_creator_user_id=self.u_creator.to_string(), diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index a23a8189df..6b7930b1d8 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.server import HomeServer from synapse.api.constants import EventTypes, Membership +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool, MockKey @@ -49,11 +50,11 @@ class RoomMemberStoreTestCase(unittest.TestCase): self.handlers = hs.get_handlers() self.message_handler = self.handlers.message_handler - self.u_alice = hs.parse_userid("@alice:test") - self.u_bob = hs.parse_userid("@bob:test") + self.u_alice = UserID.from_string("@alice:test") + self.u_bob = UserID.from_string("@bob:test") # User elsewhere on another host - self.u_charlie = hs.parse_userid("@charlie:elsewhere") + self.u_charlie = UserID.from_string("@charlie:elsewhere") self.room = hs.parse_roomid("!abc123:test") diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 9247fc579e..d7c7f64d5e 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.server import HomeServer from synapse.api.constants import EventTypes, Membership +from synapse.types import UserID from tests.utils import SQLiteMemoryDbPool, MockKey @@ -48,8 +49,8 @@ class StreamStoreTestCase(unittest.TestCase): self.handlers = hs.get_handlers() self.message_handler = self.handlers.message_handler - self.u_alice = hs.parse_userid("@alice:test") - self.u_bob = hs.parse_userid("@bob:test") + self.u_alice = UserID.from_string("@alice:test") + self.u_bob = UserID.from_string("@bob:test") self.room1 = hs.parse_roomid("!abc123:test") self.room2 = hs.parse_roomid("!xyx987:test") diff --git a/tests/test_types.py b/tests/test_types.py index bfb9e6f548..2de7f22ab0 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -42,12 +42,6 @@ class UserIDTestCase(unittest.TestCase): self.assertTrue(userA == userAagain) self.assertTrue(userA != userB) - def test_via_homeserver(self): - user = mock_homeserver.parse_userid("@3456ijkl:my.domain") - - self.assertEquals("3456ijkl", user.localpart) - self.assertEquals("my.domain", user.domain) - class RoomAliasTestCase(unittest.TestCase): -- cgit 1.5.1 From 3b9cc882a50c886afd7d2cf1eaa7e02e8b0d0d51 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Jan 2015 15:42:52 +0000 Subject: Add storage method have_events --- synapse/storage/__init__.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 015fcc8775..4f09909607 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -422,6 +422,35 @@ class DataStore(RoomMemberStore, RoomStore, ], ) + def have_events(self, event_ids): + """Given a list of event ids, check if we have already processed them. + + Returns: + dict: Has an entry for each event id we already have seen. Maps to + the rejected reason string if we rejected the event, else maps to + None. + """ + def f(txn): + sql = ( + "SELECT e.event_id, reason FROM events as e " + "LEFT JOIN rejections as r ON e.event_id = r.event_id " + "WHERE event_id = ?" + ) + + res = {} + for event_id in event_ids: + txn.execute(sql, (event_id,)) + row = txn.fetchone() + if row: + _, rejected = row + res[event_id] = rejected + + return res + + return self.runInteraction( + "have_events", f, + ) + def schema_path(schema): """ Get a filesystem path for the named database schema -- cgit 1.5.1 From 5f84ba8ea1991dff279f0135f474d9debfd1419a Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 23 Jan 2015 17:49:37 +0000 Subject: Add API to delete push rules. --- synapse/rest/client/v1/push_rule.py | 41 ++++++++++++++++++++++++++++++++++++- synapse/storage/push_rule.py | 9 ++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 9dc2c0e11e..50bf5b9008 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -15,7 +15,8 @@ from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError, NotFoundError +from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError, NotFoundError, \ + StoreError from base import RestServlet, client_path_pattern from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException @@ -175,6 +176,44 @@ class PushRuleRestServlet(RestServlet): defer.returnValue((200, {})) + @defer.inlineCallbacks + def on_DELETE(self, request): + spec = self.rule_spec_from_path(request.postpath) + try: + priority_class = _priority_class_from_spec(spec) + except InvalidRuleException as e: + raise SynapseError(400, e.message) + + user = yield self.auth.get_user_by_req(request) + + if 'device' in spec: + rules = yield self.hs.get_datastore().get_push_rules_for_user_name( + user.to_string() + ) + + for r in rules: + conditions = json.loads(r['conditions']) + ih = _instance_handle_from_conditions(conditions) + if ih == spec['device'] and r['priority_class'] == priority_class: + yield self.hs.get_datastore().delete_push_rule( + user.to_string(), spec['rule_id'] + ) + defer.returnValue((200, {})) + raise NotFoundError() + else: + try: + yield self.hs.get_datastore().delete_push_rule( + user.to_string(), spec['rule_id'], + priority_class=priority_class + ) + defer.returnValue((200, {})) + except StoreError as e: + if e.code == 404: + raise NotFoundError() + else: + raise + + @defer.inlineCallbacks def on_GET(self, request): user = yield self.auth.get_user_by_req(request) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 2366090e09..ca04f2ccee 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -174,6 +174,15 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, new_rule.values()) + @defer.inlineCallbacks + def delete_push_rule(self, user_name, rule_id): + yield self._simple_delete_one( + PushRuleTable.table_name, + { + 'user_name': user_name, + 'rule_id': rule_id + } + ) class RuleNotFoundException(Exception): pass -- cgit 1.5.1 From 7b8861924130821c1bbd05ce65260209a993f759 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Jan 2015 10:45:24 +0000 Subject: Split up replication_layer module into client, server and transaction queue --- synapse/federation/federation_client.py | 293 +++++++++++++++ synapse/federation/federation_server.py | 345 ++++++++++++++++++ synapse/federation/replication.py | 608 +------------------------------- synapse/federation/transaction_queue.py | 9 +- synapse/storage/__init__.py | 2 +- 5 files changed, 654 insertions(+), 603 deletions(-) create mode 100644 synapse/federation/federation_client.py create mode 100644 synapse/federation/federation_server.py (limited to 'synapse/storage') diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py new file mode 100644 index 0000000000..c80f4c61bc --- /dev/null +++ b/synapse/federation/federation_client.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from twisted.internet import defer + +from .units import Edu + +from synapse.util.logutils import log_function +from synapse.events import FrozenEvent + +import logging + + +logger = logging.getLogger(__name__) + + +class FederationClient(object): + @log_function + def send_pdu(self, pdu, destinations): + """Informs the replication layer about a new PDU generated within the + home server that should be transmitted to others. + + TODO: Figure out when we should actually resolve the deferred. + + Args: + pdu (Pdu): The new Pdu. + + Returns: + Deferred: Completes when we have successfully processed the PDU + and replicated it to any interested remote home servers. + """ + order = self._order + self._order += 1 + + logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id) + + # TODO, add errback, etc. + self._transaction_queue.enqueue_pdu(pdu, destinations, order) + + logger.debug( + "[%s] transaction_layer.enqueue_pdu... done", + pdu.event_id + ) + + @log_function + def send_edu(self, destination, edu_type, content): + edu = Edu( + origin=self.server_name, + destination=destination, + edu_type=edu_type, + content=content, + ) + + # TODO, add errback, etc. + self._transaction_queue.enqueue_edu(edu) + return defer.succeed(None) + + @log_function + def send_failure(self, failure, destination): + self._transaction_queue.enqueue_failure(failure, destination) + return defer.succeed(None) + + @log_function + def make_query(self, destination, query_type, args, + retry_on_dns_fail=True): + """Sends a federation Query to a remote homeserver of the given type + and arguments. + + Args: + destination (str): Domain name of the remote homeserver + query_type (str): Category of the query type; should match the + handler name used in register_query_handler(). + args (dict): Mapping of strings to strings containing the details + of the query request. + + Returns: + a Deferred which will eventually yield a JSON object from the + response + """ + return self.transport_layer.make_query( + destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail + ) + + @defer.inlineCallbacks + @log_function + def backfill(self, dest, context, limit, extremities): + """Requests some more historic PDUs for the given context from the + given destination server. + + Args: + dest (str): The remote home server to ask. + context (str): The context to backfill. + limit (int): The maximum number of PDUs to return. + extremities (list): List of PDU id and origins of the first pdus + we have seen from the context + + Returns: + Deferred: Results in the received PDUs. + """ + logger.debug("backfill extrem=%s", extremities) + + # If there are no extremeties then we've (probably) reached the start. + if not extremities: + return + + transaction_data = yield self.transport_layer.backfill( + dest, context, extremities, limit) + + logger.debug("backfill transaction_data=%s", repr(transaction_data)) + + pdus = [ + self.event_from_pdu_json(p, outlier=False) + for p in transaction_data["pdus"] + ] + + defer.returnValue(pdus) + + @defer.inlineCallbacks + @log_function + def get_pdu(self, destinations, event_id, outlier=False): + """Requests the PDU with given origin and ID from the remote home + servers. + + Will attempt to get the PDU from each destination in the list until + one succeeds. + + This will persist the PDU locally upon receipt. + + Args: + destinations (list): Which home servers to query + pdu_origin (str): The home server that originally sent the pdu. + event_id (str) + outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if + it's from an arbitary point in the context as opposed to part + of the current block of PDUs. Defaults to `False` + + Returns: + Deferred: Results in the requested PDU. + """ + + # TODO: Rate limit the number of times we try and get the same event. + + pdu = None + for destination in destinations: + try: + transaction_data = yield self.transport_layer.get_event( + destination, event_id + ) + except Exception as e: + logger.info( + "Failed to get PDU %s from %s because %s", + event_id, destination, e, + ) + continue + + logger.debug("transaction_data %r", transaction_data) + + pdu_list = [ + self.event_from_pdu_json(p, outlier=outlier) + for p in transaction_data["pdus"] + ] + + if pdu_list: + pdu = pdu_list[0] + # TODO: We need to check signatures here + break + + defer.returnValue(pdu) + + @defer.inlineCallbacks + @log_function + def get_state_for_room(self, destination, room_id, event_id): + """Requests all of the `current` state PDUs for a given room from + a remote home server. + + Args: + destination (str): The remote homeserver to query for the state. + room_id (str): The id of the room we're interested in. + event_id (str): The id of the event we want the state at. + + Returns: + Deferred: Results in a list of PDUs. + """ + + result = yield self.transport_layer.get_room_state( + destination, room_id, event_id=event_id, + ) + + pdus = [ + self.event_from_pdu_json(p, outlier=True) for p in result["pdus"] + ] + + auth_chain = [ + self.event_from_pdu_json(p, outlier=True) + for p in result.get("auth_chain", []) + ] + + defer.returnValue((pdus, auth_chain)) + + @defer.inlineCallbacks + @log_function + def get_event_auth(self, destination, room_id, event_id): + res = yield self.transport_layer.get_event_auth( + destination, room_id, event_id, + ) + + auth_chain = [ + self.event_from_pdu_json(p, outlier=True) + for p in res["auth_chain"] + ] + + auth_chain.sort(key=lambda e: e.depth) + + defer.returnValue(auth_chain) + + @defer.inlineCallbacks + def make_join(self, destination, room_id, user_id): + ret = yield self.transport_layer.make_join( + destination, room_id, user_id + ) + + pdu_dict = ret["event"] + + logger.debug("Got response to make_join: %s", pdu_dict) + + defer.returnValue(self.event_from_pdu_json(pdu_dict)) + + @defer.inlineCallbacks + def send_join(self, destination, pdu): + time_now = self._clock.time_msec() + _, content = yield self.transport_layer.send_join( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + logger.debug("Got content: %s", content) + + state = [ + self.event_from_pdu_json(p, outlier=True) + for p in content.get("state", []) + ] + + auth_chain = [ + self.event_from_pdu_json(p, outlier=True) + for p in content.get("auth_chain", []) + ] + + auth_chain.sort(key=lambda e: e.depth) + + defer.returnValue({ + "state": state, + "auth_chain": auth_chain, + }) + + @defer.inlineCallbacks + def send_invite(self, destination, room_id, event_id, pdu): + time_now = self._clock.time_msec() + code, content = yield self.transport_layer.send_invite( + destination=destination, + room_id=room_id, + event_id=event_id, + content=pdu.get_pdu_json(time_now), + ) + + pdu_dict = content["event"] + + logger.debug("Got response to send_invite: %s", pdu_dict) + + defer.returnValue(self.event_from_pdu_json(pdu_dict)) + + def event_from_pdu_json(self, pdu_json, outlier=False): + event = FrozenEvent( + pdu_json + ) + + event.internal_metadata.outlier = outlier + + return event diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py new file mode 100644 index 0000000000..0597725ce7 --- /dev/null +++ b/synapse/federation/federation_server.py @@ -0,0 +1,345 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from twisted.internet import defer + +from .units import Transaction, Edu + +from synapse.util.logutils import log_function +from synapse.util.logcontext import PreserveLoggingContext +from synapse.events import FrozenEvent + +import logging + + +logger = logging.getLogger(__name__) + + +class FederationServer(object): + def set_handler(self, handler): + """Sets the handler that the replication layer will use to communicate + receipt of new PDUs from other home servers. The required methods are + documented on :py:class:`.ReplicationHandler`. + """ + self.handler = handler + + def register_edu_handler(self, edu_type, handler): + if edu_type in self.edu_handlers: + raise KeyError("Already have an EDU handler for %s" % (edu_type,)) + + self.edu_handlers[edu_type] = handler + + def register_query_handler(self, query_type, handler): + """Sets the handler callable that will be used to handle an incoming + federation Query of the given type. + + Args: + query_type (str): Category name of the query, which should match + the string used by make_query. + handler (callable): Invoked to handle incoming queries of this type + + handler is invoked as: + result = handler(args) + + where 'args' is a dict mapping strings to strings of the query + arguments. It should return a Deferred that will eventually yield an + object to encode as JSON. + """ + if query_type in self.query_handlers: + raise KeyError( + "Already have a Query handler for %s" % (query_type,) + ) + + self.query_handlers[query_type] = handler + + @defer.inlineCallbacks + @log_function + def on_backfill_request(self, origin, room_id, versions, limit): + pdus = yield self.handler.on_backfill_request( + origin, room_id, versions, limit + ) + + defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict())) + + @defer.inlineCallbacks + @log_function + def on_incoming_transaction(self, transaction_data): + transaction = Transaction(**transaction_data) + + for p in transaction.pdus: + if "unsigned" in p: + unsigned = p["unsigned"] + if "age" in unsigned: + p["age"] = unsigned["age"] + if "age" in p: + p["age_ts"] = int(self._clock.time_msec()) - int(p["age"]) + del p["age"] + + pdu_list = [ + self.event_from_pdu_json(p) for p in transaction.pdus + ] + + logger.debug("[%s] Got transaction", transaction.transaction_id) + + response = yield self.transaction_actions.have_responded(transaction) + + if response: + logger.debug("[%s] We've already responed to this request", + transaction.transaction_id) + defer.returnValue(response) + return + + logger.debug("[%s] Transaction is new", transaction.transaction_id) + + with PreserveLoggingContext(): + dl = [] + for pdu in pdu_list: + dl.append(self._handle_new_pdu(transaction.origin, pdu)) + + if hasattr(transaction, "edus"): + for edu in [Edu(**x) for x in transaction.edus]: + self.received_edu( + transaction.origin, + edu.edu_type, + edu.content + ) + + results = yield defer.DeferredList(dl) + + ret = [] + for r in results: + if r[0]: + ret.append({}) + else: + logger.exception(r[1]) + ret.append({"error": str(r[1])}) + + logger.debug("Returning: %s", str(ret)) + + yield self.transaction_actions.set_response( + transaction, + 200, response + ) + defer.returnValue((200, response)) + + def received_edu(self, origin, edu_type, content): + if edu_type in self.edu_handlers: + self.edu_handlers[edu_type](origin, content) + else: + logger.warn("Received EDU of type %s with no handler", edu_type) + + @defer.inlineCallbacks + @log_function + def on_context_state_request(self, origin, room_id, event_id): + if event_id: + pdus = yield self.handler.get_state_for_pdu( + origin, room_id, event_id, + ) + auth_chain = yield self.store.get_auth_chain( + [pdu.event_id for pdu in pdus] + ) + else: + raise NotImplementedError("Specify an event") + + defer.returnValue((200, { + "pdus": [pdu.get_pdu_json() for pdu in pdus], + "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], + })) + + @defer.inlineCallbacks + @log_function + def on_pdu_request(self, origin, event_id): + pdu = yield self._get_persisted_pdu(origin, event_id) + + if pdu: + defer.returnValue( + (200, self._transaction_from_pdus([pdu]).get_dict()) + ) + else: + defer.returnValue((404, "")) + + @defer.inlineCallbacks + @log_function + def on_pull_request(self, origin, versions): + raise NotImplementedError("Pull transactions not implemented") + + @defer.inlineCallbacks + def on_query_request(self, query_type, args): + if query_type in self.query_handlers: + response = yield self.query_handlers[query_type](args) + defer.returnValue((200, response)) + else: + defer.returnValue( + (404, "No handler for Query type '%s'" % (query_type,)) + ) + + @defer.inlineCallbacks + def on_make_join_request(self, room_id, user_id): + pdu = yield self.handler.on_make_join_request(room_id, user_id) + time_now = self._clock.time_msec() + defer.returnValue({"event": pdu.get_pdu_json(time_now)}) + + @defer.inlineCallbacks + def on_invite_request(self, origin, content): + pdu = self.event_from_pdu_json(content) + ret_pdu = yield self.handler.on_invite_request(origin, pdu) + time_now = self._clock.time_msec() + defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)})) + + @defer.inlineCallbacks + def on_send_join_request(self, origin, content): + logger.debug("on_send_join_request: content: %s", content) + pdu = self.event_from_pdu_json(content) + logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) + res_pdus = yield self.handler.on_send_join_request(origin, pdu) + time_now = self._clock.time_msec() + defer.returnValue((200, { + "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], + "auth_chain": [ + p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] + ], + })) + + @defer.inlineCallbacks + def on_event_auth(self, origin, room_id, event_id): + time_now = self._clock.time_msec() + auth_pdus = yield self.handler.on_event_auth(event_id) + defer.returnValue((200, { + "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus], + })) + + @log_function + def _get_persisted_pdu(self, origin, event_id, do_auth=True): + """ Get a PDU from the database with given origin and id. + + Returns: + Deferred: Results in a `Pdu`. + """ + return self.handler.get_persisted_pdu( + origin, event_id, do_auth=do_auth + ) + + def _transaction_from_pdus(self, pdu_list): + """Returns a new Transaction containing the given PDUs suitable for + transmission. + """ + time_now = self._clock.time_msec() + pdus = [p.get_pdu_json(time_now) for p in pdu_list] + return Transaction( + origin=self.server_name, + pdus=pdus, + origin_server_ts=int(time_now), + destination=None, + ) + + @defer.inlineCallbacks + @log_function + def _handle_new_pdu(self, origin, pdu, max_recursion=10): + # We reprocess pdus when we have seen them only as outliers + existing = yield self._get_persisted_pdu( + origin, pdu.event_id, do_auth=False + ) + + already_seen = ( + existing and ( + not existing.internal_metadata.is_outlier() + or pdu.internal_metadata.is_outlier() + ) + ) + if already_seen: + logger.debug("Already seen pdu %s", pdu.event_id) + defer.returnValue({}) + return + + state = None + + auth_chain = [] + + have_seen = yield self.store.have_events( + [e for e, _ in pdu.prev_events] + ) + + # Get missing pdus if necessary. + if not pdu.internal_metadata.is_outlier(): + # We only backfill backwards to the min depth. + min_depth = yield self.handler.get_min_depth_for_context( + pdu.room_id + ) + + logger.debug( + "_handle_new_pdu min_depth for %s: %d", + pdu.room_id, min_depth + ) + + if min_depth and pdu.depth > min_depth and max_recursion > 0: + for event_id, hashes in pdu.prev_events: + if event_id not in have_seen: + logger.debug( + "_handle_new_pdu requesting pdu %s", + event_id + ) + + try: + new_pdu = yield self.federation_client.get_pdu( + [origin, pdu.origin], + event_id=event_id, + ) + + if new_pdu: + yield self._handle_new_pdu( + origin, + new_pdu, + max_recursion=max_recursion-1 + ) + + logger.debug("Processed pdu %s", event_id) + else: + logger.warn("Failed to get PDU %s", event_id) + except: + # TODO(erikj): Do some more intelligent retries. + logger.exception("Failed to get PDU") + else: + # We need to get the state at this event, since we have reached + # a backward extremity edge. + logger.debug( + "_handle_new_pdu getting state for %s", + pdu.room_id + ) + state, auth_chain = yield self.get_state_for_room( + origin, pdu.room_id, pdu.event_id, + ) + + ret = yield self.handler.on_receive_pdu( + origin, + pdu, + backfilled=False, + state=state, + auth_chain=auth_chain, + ) + + defer.returnValue(ret) + + def __str__(self): + return "" % self.server_name + + def event_from_pdu_json(self, pdu_json, outlier=False): + event = FrozenEvent( + pdu_json + ) + + event.internal_metadata.outlier = outlier + + return event diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py index accf95e406..9ef4834927 100644 --- a/synapse/federation/replication.py +++ b/synapse/federation/replication.py @@ -17,15 +17,12 @@ a given transport. """ -from twisted.internet import defer +from .federation_client import FederationClient +from .federation_server import FederationServer -from .units import Transaction, Edu -from .persistence import TransactionActions from .transaction_queue import TransactionQueue -from synapse.util.logutils import log_function -from synapse.util.logcontext import PreserveLoggingContext -from synapse.events import FrozenEvent +from .persistence import TransactionActions import logging @@ -33,7 +30,7 @@ import logging logger = logging.getLogger(__name__) -class ReplicationLayer(object): +class ReplicationLayer(FederationClient, FederationServer): """This layer is responsible for replicating with remote home servers over the given transport. I.e., does the sending and receiving of PDUs to remote home servers. @@ -58,607 +55,20 @@ class ReplicationLayer(object): self.transport_layer.register_received_handler(self) self.transport_layer.register_request_handler(self) - self.store = hs.get_datastore() - # self.pdu_actions = PduActions(self.store) - self.transaction_actions = TransactionActions(self.store) + self.federation_client = self - self._transaction_queue = TransactionQueue( - hs, self.transaction_actions, transport_layer - ) + self.store = hs.get_datastore() self.handler = None self.edu_handlers = {} self.query_handlers = {} - self._order = 0 - self._clock = hs.get_clock() - self.event_builder_factory = hs.get_event_builder_factory() - - def set_handler(self, handler): - """Sets the handler that the replication layer will use to communicate - receipt of new PDUs from other home servers. The required methods are - documented on :py:class:`.ReplicationHandler`. - """ - self.handler = handler - - def register_edu_handler(self, edu_type, handler): - if edu_type in self.edu_handlers: - raise KeyError("Already have an EDU handler for %s" % (edu_type,)) - - self.edu_handlers[edu_type] = handler - - def register_query_handler(self, query_type, handler): - """Sets the handler callable that will be used to handle an incoming - federation Query of the given type. - - Args: - query_type (str): Category name of the query, which should match - the string used by make_query. - handler (callable): Invoked to handle incoming queries of this type - - handler is invoked as: - result = handler(args) - - where 'args' is a dict mapping strings to strings of the query - arguments. It should return a Deferred that will eventually yield an - object to encode as JSON. - """ - if query_type in self.query_handlers: - raise KeyError( - "Already have a Query handler for %s" % (query_type,) - ) - - self.query_handlers[query_type] = handler - - @log_function - def send_pdu(self, pdu, destinations): - """Informs the replication layer about a new PDU generated within the - home server that should be transmitted to others. - - TODO: Figure out when we should actually resolve the deferred. - - Args: - pdu (Pdu): The new Pdu. - - Returns: - Deferred: Completes when we have successfully processed the PDU - and replicated it to any interested remote home servers. - """ - order = self._order - self._order += 1 - - logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id) - - # TODO, add errback, etc. - self._transaction_queue.enqueue_pdu(pdu, destinations, order) - - logger.debug( - "[%s] transaction_layer.enqueue_pdu... done", - pdu.event_id - ) - - @log_function - def send_edu(self, destination, edu_type, content): - edu = Edu( - origin=self.server_name, - destination=destination, - edu_type=edu_type, - content=content, - ) - - # TODO, add errback, etc. - self._transaction_queue.enqueue_edu(edu) - return defer.succeed(None) - - @log_function - def send_failure(self, failure, destination): - self._transaction_queue.enqueue_failure(failure, destination) - return defer.succeed(None) - - @log_function - def make_query(self, destination, query_type, args, - retry_on_dns_fail=True): - """Sends a federation Query to a remote homeserver of the given type - and arguments. - - Args: - destination (str): Domain name of the remote homeserver - query_type (str): Category of the query type; should match the - handler name used in register_query_handler(). - args (dict): Mapping of strings to strings containing the details - of the query request. - - Returns: - a Deferred which will eventually yield a JSON object from the - response - """ - return self.transport_layer.make_query( - destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail - ) - - @defer.inlineCallbacks - @log_function - def backfill(self, dest, context, limit, extremities): - """Requests some more historic PDUs for the given context from the - given destination server. - - Args: - dest (str): The remote home server to ask. - context (str): The context to backfill. - limit (int): The maximum number of PDUs to return. - extremities (list): List of PDU id and origins of the first pdus - we have seen from the context - - Returns: - Deferred: Results in the received PDUs. - """ - logger.debug("backfill extrem=%s", extremities) - - # If there are no extremeties then we've (probably) reached the start. - if not extremities: - return - - transaction_data = yield self.transport_layer.backfill( - dest, context, extremities, limit) - - logger.debug("backfill transaction_data=%s", repr(transaction_data)) - - transaction = Transaction(**transaction_data) - - pdus = [ - self.event_from_pdu_json(p, outlier=False) - for p in transaction.pdus - ] - for pdu in pdus: - yield self._handle_new_pdu(dest, pdu, backfilled=True) - - defer.returnValue(pdus) - - @defer.inlineCallbacks - @log_function - def get_pdu(self, destination, event_id, outlier=False): - """Requests the PDU with given origin and ID from the remote home - server. - - This will persist the PDU locally upon receipt. - - Args: - destination (str): Which home server to query - pdu_origin (str): The home server that originally sent the pdu. - event_id (str) - outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if - it's from an arbitary point in the context as opposed to part - of the current block of PDUs. Defaults to `False` - - Returns: - Deferred: Results in the requested PDU. - """ - - transaction_data = yield self.transport_layer.get_event( - destination, event_id - ) - - transaction = Transaction(**transaction_data) - - pdu_list = [ - self.event_from_pdu_json(p, outlier=outlier) - for p in transaction.pdus - ] - - pdu = None - if pdu_list: - pdu = pdu_list[0] - yield self._handle_new_pdu(destination, pdu) - - defer.returnValue(pdu) - - @defer.inlineCallbacks - @log_function - def get_state_for_room(self, destination, room_id, event_id): - """Requests all of the `current` state PDUs for a given room from - a remote home server. - - Args: - destination (str): The remote homeserver to query for the state. - room_id (str): The id of the room we're interested in. - event_id (str): The id of the event we want the state at. - - Returns: - Deferred: Results in a list of PDUs. - """ - - result = yield self.transport_layer.get_room_state( - destination, room_id, event_id=event_id, - ) - - pdus = [ - self.event_from_pdu_json(p, outlier=True) for p in result["pdus"] - ] - - auth_chain = [ - self.event_from_pdu_json(p, outlier=True) - for p in result.get("auth_chain", []) - ] - - defer.returnValue((pdus, auth_chain)) - - @defer.inlineCallbacks - @log_function - def get_event_auth(self, destination, room_id, event_id): - res = yield self.transport_layer.get_event_auth( - destination, room_id, event_id, - ) - - auth_chain = [ - self.event_from_pdu_json(p, outlier=True) - for p in res["auth_chain"] - ] - - auth_chain.sort(key=lambda e: e.depth) - - defer.returnValue(auth_chain) - - @defer.inlineCallbacks - @log_function - def on_backfill_request(self, origin, room_id, versions, limit): - pdus = yield self.handler.on_backfill_request( - origin, room_id, versions, limit - ) - - defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict())) - - @defer.inlineCallbacks - @log_function - def on_incoming_transaction(self, transaction_data): - transaction = Transaction(**transaction_data) - - for p in transaction.pdus: - if "unsigned" in p: - unsigned = p["unsigned"] - if "age" in unsigned: - p["age"] = unsigned["age"] - if "age" in p: - p["age_ts"] = int(self._clock.time_msec()) - int(p["age"]) - del p["age"] - - pdu_list = [ - self.event_from_pdu_json(p) for p in transaction.pdus - ] - - logger.debug("[%s] Got transaction", transaction.transaction_id) - - response = yield self.transaction_actions.have_responded(transaction) - - if response: - logger.debug("[%s] We've already responed to this request", - transaction.transaction_id) - defer.returnValue(response) - return - - logger.debug("[%s] Transaction is new", transaction.transaction_id) - - with PreserveLoggingContext(): - dl = [] - for pdu in pdu_list: - dl.append(self._handle_new_pdu(transaction.origin, pdu)) - - if hasattr(transaction, "edus"): - for edu in [Edu(**x) for x in transaction.edus]: - self.received_edu( - transaction.origin, - edu.edu_type, - edu.content - ) - - results = yield defer.DeferredList(dl) - - ret = [] - for r in results: - if r[0]: - ret.append({}) - else: - logger.exception(r[1]) - ret.append({"error": str(r[1])}) - - logger.debug("Returning: %s", str(ret)) - - yield self.transaction_actions.set_response( - transaction, - 200, response - ) - defer.returnValue((200, response)) - - def received_edu(self, origin, edu_type, content): - if edu_type in self.edu_handlers: - self.edu_handlers[edu_type](origin, content) - else: - logger.warn("Received EDU of type %s with no handler", edu_type) - - @defer.inlineCallbacks - @log_function - def on_context_state_request(self, origin, room_id, event_id): - if event_id: - pdus = yield self.handler.get_state_for_pdu( - origin, room_id, event_id, - ) - auth_chain = yield self.store.get_auth_chain( - [pdu.event_id for pdu in pdus] - ) - else: - raise NotImplementedError("Specify an event") - - defer.returnValue((200, { - "pdus": [pdu.get_pdu_json() for pdu in pdus], - "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], - })) - - @defer.inlineCallbacks - @log_function - def on_pdu_request(self, origin, event_id): - pdu = yield self._get_persisted_pdu(origin, event_id) - - if pdu: - defer.returnValue( - (200, self._transaction_from_pdus([pdu]).get_dict()) - ) - else: - defer.returnValue((404, "")) - - @defer.inlineCallbacks - @log_function - def on_pull_request(self, origin, versions): - raise NotImplementedError("Pull transactions not implemented") - - @defer.inlineCallbacks - def on_query_request(self, query_type, args): - if query_type in self.query_handlers: - response = yield self.query_handlers[query_type](args) - defer.returnValue((200, response)) - else: - defer.returnValue( - (404, "No handler for Query type '%s'" % (query_type,)) - ) - - @defer.inlineCallbacks - def on_make_join_request(self, room_id, user_id): - pdu = yield self.handler.on_make_join_request(room_id, user_id) - time_now = self._clock.time_msec() - defer.returnValue({"event": pdu.get_pdu_json(time_now)}) - - @defer.inlineCallbacks - def on_invite_request(self, origin, content): - pdu = self.event_from_pdu_json(content) - ret_pdu = yield self.handler.on_invite_request(origin, pdu) - time_now = self._clock.time_msec() - defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)})) - - @defer.inlineCallbacks - def on_send_join_request(self, origin, content): - logger.debug("on_send_join_request: content: %s", content) - pdu = self.event_from_pdu_json(content) - logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) - res_pdus = yield self.handler.on_send_join_request(origin, pdu) - time_now = self._clock.time_msec() - defer.returnValue((200, { - "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], - })) - - @defer.inlineCallbacks - def on_event_auth(self, origin, room_id, event_id): - time_now = self._clock.time_msec() - auth_pdus = yield self.handler.on_event_auth(event_id) - defer.returnValue((200, { - "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus], - })) - - @defer.inlineCallbacks - def make_join(self, destination, room_id, user_id): - ret = yield self.transport_layer.make_join( - destination, room_id, user_id - ) - - pdu_dict = ret["event"] - - logger.debug("Got response to make_join: %s", pdu_dict) - - defer.returnValue(self.event_from_pdu_json(pdu_dict)) - - @defer.inlineCallbacks - def send_join(self, destination, pdu): - time_now = self._clock.time_msec() - _, content = yield self.transport_layer.send_join( - destination=destination, - room_id=pdu.room_id, - event_id=pdu.event_id, - content=pdu.get_pdu_json(time_now), - ) - - logger.debug("Got content: %s", content) - - state = [ - self.event_from_pdu_json(p, outlier=True) - for p in content.get("state", []) - ] - - auth_chain = [ - self.event_from_pdu_json(p, outlier=True) - for p in content.get("auth_chain", []) - ] - - auth_chain.sort(key=lambda e: e.depth) - - defer.returnValue({ - "state": state, - "auth_chain": auth_chain, - }) - - @defer.inlineCallbacks - def send_invite(self, destination, room_id, event_id, pdu): - time_now = self._clock.time_msec() - code, content = yield self.transport_layer.send_invite( - destination=destination, - room_id=room_id, - event_id=event_id, - content=pdu.get_pdu_json(time_now), - ) - - pdu_dict = content["event"] - - logger.debug("Got response to send_invite: %s", pdu_dict) - - defer.returnValue(self.event_from_pdu_json(pdu_dict)) - - @log_function - def _get_persisted_pdu(self, origin, event_id, do_auth=True): - """ Get a PDU from the database with given origin and id. - - Returns: - Deferred: Results in a `Pdu`. - """ - return self.handler.get_persisted_pdu( - origin, event_id, do_auth=do_auth - ) - - def _transaction_from_pdus(self, pdu_list): - """Returns a new Transaction containing the given PDUs suitable for - transmission. - """ - time_now = self._clock.time_msec() - pdus = [p.get_pdu_json(time_now) for p in pdu_list] - return Transaction( - origin=self.server_name, - pdus=pdus, - origin_server_ts=int(time_now), - destination=None, - ) - - @defer.inlineCallbacks - @log_function - def _handle_new_pdu(self, origin, pdu, backfilled=False): - # We reprocess pdus when we have seen them only as outliers - existing = yield self._get_persisted_pdu( - origin, pdu.event_id, do_auth=False - ) - - already_seen = ( - existing and ( - not existing.internal_metadata.is_outlier() - or pdu.internal_metadata.is_outlier() - ) - ) - if already_seen: - logger.debug("Already seen pdu %s", pdu.event_id) - defer.returnValue({}) - return - - state = None - - auth_chain = [] - - # We need to make sure we have all the auth events. - # for e_id, _ in pdu.auth_events: - # exists = yield self._get_persisted_pdu( - # origin, - # e_id, - # do_auth=False - # ) - # - # if not exists: - # try: - # logger.debug( - # "_handle_new_pdu fetch missing auth event %s from %s", - # e_id, - # origin, - # ) - # - # yield self.get_pdu( - # origin, - # event_id=e_id, - # outlier=True, - # ) - # - # logger.debug("Processed pdu %s", e_id) - # except: - # logger.warn( - # "Failed to get auth event %s from %s", - # e_id, - # origin - # ) - - # Get missing pdus if necessary. - if not pdu.internal_metadata.is_outlier(): - # We only backfill backwards to the min depth. - min_depth = yield self.handler.get_min_depth_for_context( - pdu.room_id - ) - - logger.debug( - "_handle_new_pdu min_depth for %s: %d", - pdu.room_id, min_depth - ) - - if min_depth and pdu.depth > min_depth: - for event_id, hashes in pdu.prev_events: - exists = yield self._get_persisted_pdu( - origin, - event_id, - do_auth=False - ) - - if not exists: - logger.debug( - "_handle_new_pdu requesting pdu %s", - event_id - ) - - try: - yield self.get_pdu( - origin, - event_id=event_id, - ) - logger.debug("Processed pdu %s", event_id) - except: - # TODO(erikj): Do some more intelligent retries. - logger.exception("Failed to get PDU") - else: - # We need to get the state at this event, since we have reached - # a backward extremity edge. - logger.debug( - "_handle_new_pdu getting state for %s", - pdu.room_id - ) - state, auth_chain = yield self.get_state_for_room( - origin, pdu.room_id, pdu.event_id, - ) - - if not backfilled: - ret = yield self.handler.on_receive_pdu( - origin, - pdu, - backfilled=backfilled, - state=state, - auth_chain=auth_chain, - ) - else: - ret = None - - # yield self.pdu_actions.mark_as_processed(pdu) + self.transaction_actions = TransactionActions(self.store) + self._transaction_queue = TransactionQueue(hs, transport_layer) - defer.returnValue(ret) + self._order = 0 def __str__(self): return "" % self.server_name - - def event_from_pdu_json(self, pdu_json, outlier=False): - event = FrozenEvent( - pdu_json - ) - - event.internal_metadata.outlier = outlier - - return event diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index c2cb4a1c49..9d4f2c09a2 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -16,6 +16,7 @@ from twisted.internet import defer +from .persistence import TransactionActions from .units import Transaction from synapse.util.logutils import log_function @@ -34,13 +35,15 @@ class TransactionQueue(object): It batches pending PDUs into single transactions. """ - def __init__(self, hs, transaction_actions, transport_layer): + def __init__(self, hs, transport_layer): self.server_name = hs.hostname - self.transaction_actions = transaction_actions + + self.store = hs.get_datastore() + self.transaction_actions = TransactionActions(self.store) + self.transport_layer = transport_layer self._clock = hs.get_clock() - self.store = hs.get_datastore() # Is a mapping from destinations -> deferreds. Used to keep track # of which destinations have transactions in flight and when they are diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4f09909607..27d835db79 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -434,7 +434,7 @@ class DataStore(RoomMemberStore, RoomStore, sql = ( "SELECT e.event_id, reason FROM events as e " "LEFT JOIN rejections as r ON e.event_id = r.event_id " - "WHERE event_id = ?" + "WHERE e.event_id = ?" ) res = {} -- cgit 1.5.1 From 69a75b7ebebb393c1ce84ff949f3480a6af0a782 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 26 Jan 2015 16:52:47 +0000 Subject: Add brackets to make get room name / alias work --- synapse/storage/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 8f56d90d95..2534d109fd 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -402,8 +402,8 @@ class DataStore(RoomMemberStore, RoomStore, "redacted": del_sql, } - sql += " AND (s.type = 'm.room.name' AND s.state_key = '')" - sql += " OR s.type = 'm.room.aliases'" + sql += " AND ((s.type = 'm.room.name' AND s.state_key = '')" + sql += " OR s.type = 'm.room.aliases')" args = (room_id,) results = yield self._execute_and_decode(sql, *args) -- cgit 1.5.1 From 51449e06654c4af7a645124dc64e1f0cc1678b24 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 27 Jan 2015 15:50:28 +0000 Subject: Add appservice handler and store. Glue together rest > handler > store. --- synapse/handlers/__init__.py | 2 ++ synapse/handlers/appservice.py | 49 ++++++++++++++++++++++++++++++++++ synapse/rest/appservice/v1/base.py | 3 ++- synapse/rest/appservice/v1/register.py | 8 ++++-- synapse/storage/__init__.py | 5 ++-- synapse/storage/appservice.py | 45 +++++++++++++++++++++++++++++++ 6 files changed, 107 insertions(+), 5 deletions(-) create mode 100644 synapse/handlers/appservice.py create mode 100644 synapse/storage/appservice.py (limited to 'synapse/storage') diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index fe071a4bc2..96a9b143ca 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -26,6 +26,7 @@ from .presence import PresenceHandler from .directory import DirectoryHandler from .typing import TypingNotificationHandler from .admin import AdminHandler +from .appservice import ApplicationServicesHandler class Handlers(object): @@ -51,3 +52,4 @@ class Handlers(object): self.directory_handler = DirectoryHandler(hs) self.typing_notification_handler = TypingNotificationHandler(hs) self.admin_handler = AdminHandler(hs) + self.appservice_handler = ApplicationServicesHandler(hs) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py new file mode 100644 index 0000000000..55a653476f --- /dev/null +++ b/synapse/handlers/appservice.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from ._base import BaseHandler +from synapse.api.errors import StoreError, SynapseError + +import logging + + +logger = logging.getLogger(__name__) + + +class ApplicationServicesHandler(BaseHandler): + + def __init__(self, hs): + super(ApplicationServicesHandler, self).__init__(hs) + + @defer.inlineCallbacks + def register(self, base_url, token, namespaces): + # check the token is recognised + try: + app_service = yield self.store.get_app_service(token) + if not app_service: + raise StoreError + except StoreError: + raise SynapseError( + 403, "Unrecognised application services token. " + "Consult the home server admin." + ) + + # update AS entry with base URL + + # store namespaces for this AS + + defer.returnValue("not_implemented_yet") diff --git a/synapse/rest/appservice/v1/base.py b/synapse/rest/appservice/v1/base.py index 46c9a444c0..65d5bcf9be 100644 --- a/synapse/rest/appservice/v1/base.py +++ b/synapse/rest/appservice/v1/base.py @@ -44,4 +44,5 @@ class AppServiceRestServlet(RestServlet): """ def __init__(self, hs): - self.hs = hs \ No newline at end of file + self.hs = hs + self.handler = hs.get_handlers().appservice_handler diff --git a/synapse/rest/appservice/v1/register.py b/synapse/rest/appservice/v1/register.py index 779447ac6a..142f09a638 100644 --- a/synapse/rest/appservice/v1/register.py +++ b/synapse/rest/appservice/v1/register.py @@ -14,6 +14,7 @@ # limitations under the License. """This module contains REST servlets to do with registration: /register""" +from twisted.internet import defer from base import AppServiceRestServlet, as_path_pattern from synapse.api.errors import CodeMessageException, SynapseError @@ -30,6 +31,7 @@ class RegisterRestServlet(AppServiceRestServlet): PATTERN = as_path_pattern("/register$") + @defer.inlineCallbacks def on_POST(self, request): params = _parse_json(request) @@ -56,9 +58,11 @@ class RegisterRestServlet(AppServiceRestServlet): self._parse_namespace(namespaces, params["namespaces"], "rooms") self._parse_namespace(namespaces, params["namespaces"], "aliases") - # TODO: pass to the appservice handler + hs_token = yield self.handler.register(as_url, as_token, namespaces) - raise CodeMessageException(500, "Not implemented.") + defer.returnValue({ + "hs_token": hs_token + }) def _parse_namespace(self, target_ns, origin_ns, ns): if ns not in target_ns or ns not in origin_ns: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4beb951b9f..9431c1a32d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.util.logutils import log_function from synapse.api.constants import EventTypes +from .appservice import ApplicationServiceStore from .directory import DirectoryStore from .feedback import FeedbackStore from .presence import PresenceStore @@ -80,8 +81,8 @@ class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, FeedbackStore, PresenceStore, TransactionStore, DirectoryStore, KeyStore, StateStore, SignatureStore, - EventFederationStore, - MediaRepositoryStore, + EventFederationStore, MediaRepositoryStore, + ApplicationServiceStore ): def __init__(self, hs): diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py new file mode 100644 index 0000000000..99f58b4c62 --- /dev/null +++ b/synapse/storage/appservice.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.api.errors import StoreError + +from ._base import SQLBaseStore + + +class ApplicationServiceStore(SQLBaseStore): + + def __init__(self, hs): + super(ApplicationServiceStore, self).__init__(hs) + + self.clock = hs.get_clock() + + @defer.inlineCallbacks + def get_app_service(self, as_token): + """Get the application service with the given token. + + Args: + token (str): The application service token. + Raises: + StoreError if there was a problem retrieving this. + """ + row = self._simple_select_one( + "application_services", {"token": as_token}, + ["url", "token"] + ) + if not row: + raise StoreError(400, "Bad application services token supplied.") + defer.returnValue(row) -- cgit 1.5.1 From 7331d34839dca468b9e396e1d2952b0bb32011bf Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 27 Jan 2015 16:23:46 +0000 Subject: Add AS specific classes with docstrings. --- synapse/storage/appservice.py | 60 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 99f58b4c62..4c11191fe8 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -20,11 +20,69 @@ from synapse.api.errors import StoreError from ._base import SQLBaseStore +# XXX: This feels like it should belong in a "models" module, not storage. +class ApplicationService(object): + """Defines an application service. + + Provides methods to check if this service is "interested" in events. + """ + + def __init__(self, token, url=None, namespaces=None): + self.token = token + if url: + self.url = url + if namespaces: + self.namespaces = namespaces + + def is_interested(self, event): + """Check if this service is interested in this event. + + Args: + event(Event): The event to check. + Returns: + bool: True if this service would like to know about this event. + """ + # NB: This does not check room alias regex matches because that requires + # more context that an Event can provide. Room alias matches are checked + # in the ApplicationServiceHandler. + + # TODO check if event.room_id regex matches + # TODO check if event.user_id regex matches (or m.room.member state_key) + + return True + + +class ApplicationServiceCache(object): + """Caches ApplicationServices and provides utility functions on top. + + This class is designed to be invoked on incoming events in order to avoid + hammering the database every time to extract a list of application service + regexes. + """ + + def __init__(self): + self.services = [] + + def get_services_for_event(self, event): + """Retrieve a list of application services interested in this event. + + Args: + event(Event): The event to check. + Returns: + list: A list of services interested in this + event based on the service regex. + """ + interested_list = [ + s for s in self.services if s.is_event_claimed(event) + ] + return interested_list + + class ApplicationServiceStore(SQLBaseStore): def __init__(self, hs): super(ApplicationServiceStore, self).__init__(hs) - + self.cache = ApplicationServiceCache() self.clock = hs.get_clock() @defer.inlineCallbacks -- cgit 1.5.1 From a56008842b43089433768f569f35b2d14523ac39 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 27 Jan 2015 16:24:22 +0000 Subject: Start implementing incremental initial sync --- synapse/events/utils.py | 1 + synapse/handlers/sync.py | 233 +++++++++++++++++++++++++++++++++++++++++----- synapse/storage/stream.py | 41 ++++++-- 3 files changed, 241 insertions(+), 34 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/events/utils.py b/synapse/events/utils.py index b7f1ad4b40..42fb0371e5 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -137,6 +137,7 @@ def serialize_event(e, time_now_ms, client_event=True, strip_ids=False): d.pop("depth", None) d.pop("unsigned", None) d.pop("origin", None) + d.pop("prev_state", None) if strip_ids: d.pop("room_id", None) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index bbabaf3df1..f8629a588f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -37,14 +37,18 @@ SyncConfig = collections.namedtuple("SyncConfig", [ ]) -RoomSyncResult = collections.namedtuple("RoomSyncResult", [ +class RoomSyncResult(collections.namedtuple("RoomSyncResult", [ "room_id", "limited", "published", - "events", # dict of event + "events", "state", "prev_batch", -]) +])): + __slots__ = [] + + def __nonzero__(self): + return bool(self.events or self.state) class SyncResult(collections.namedtuple("SyncResult", [ @@ -56,7 +60,9 @@ class SyncResult(collections.namedtuple("SyncResult", [ __slots__ = [] def __nonzero__(self): - return self.private_user_data or self.public_user_data or self.rooms + return bool( + self.private_user_data or self.public_user_data or self.rooms + ) class SyncHandler(BaseHandler): @@ -67,7 +73,13 @@ class SyncHandler(BaseHandler): self.clock = hs.get_clock() def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0): - if timeout == 0: + """Get the sync for a client if we have new data for it now. Otherwise + wait for new data to arrive on the server. If the timeout expires, then + return an empty sync result. + Returns: + A Deferred SyncResult. + """ + if timeout == 0 or since_token is None: return self.current_sync_for_user(sync_config, since_token) else: def current_sync_callback(since_token): @@ -79,13 +91,25 @@ class SyncHandler(BaseHandler): ) def current_sync_for_user(self, sync_config, since_token=None): + """Get the sync for client needed to match what the server has now. + Returns: + A Deferred SyncResult. + """ if since_token is None: return self.initial_sync(sync_config) else: - return self.incremental_sync(sync_config) + if sync_config.gap: + return self.incremental_sync_with_gap(sync_config, since_token) + else: + #TODO(mjark): Handle gapless sync + pass @defer.inlineCallbacks def initial_sync(self, sync_config): + """Get a sync for a client which is starting without any state + Returns: + A Deferred SyncResult. + """ if sync_config.sort == "timeline,desc": # TODO(mjark): Handle going through events in reverse order?. # What does "most recent events" mean when applying the limits mean @@ -114,25 +138,86 @@ class SyncHandler(BaseHandler): rooms = [] for event in room_list: - #TODO (mjark): Apply the event filter in sync_config. - recent_events, token = yield self.store.get_recent_events_for_room( - event.room_id, - limit=sync_config.limit, - end_token=now_token.room_key, - ) - prev_batch_token = now_token.copy_and_replace("room_key", token[0]) - current_state_events = yield self.state_handler.get_current_state( - event.room_id + room_sync = yield self.initial_sync_for_room( + event.room_id, sync_config, now_token, published_room_ids ) + rooms.append(room_sync) + + defer.returnValue(SyncResult( + public_user_data=presence, + private_user_data=[], + rooms=rooms, + next_batch=now_token, + )) + + @defer.inlineCallbacks + def intial_sync_for_room(self, room_id, sync_config, now_token, + published_room_ids): + """Sync a room for a client which is starting without any state + Returns: + A Deferred RoomSyncResult. + """ + recent_events, token = yield self.store.get_recent_events_for_room( + room_id, + limit=sync_config.limit, + end_token=now_token.room_key, + ) + prev_batch_token = now_token.copy_and_replace("room_key", token[0]) + current_state_events = yield self.state_handler.get_current_state( + room_id + ) + + defer.returnValue(RoomSyncResult( + room_id=room_id, + published=room_id in published_room_ids, + events=recent_events, + prev_batch=prev_batch_token, + state=current_state_events, + limited=True, + )) + + + @defer.inlineCallbacks + def incremental_sync_with_gap(self, sync_config, since_token): + """ Get the incremental delta needed to bring the client up to + date with the server. + Returns: + A Deferred SyncResult. + """ + if sync_config.sort == "timeline,desc": + # TODO(mjark): Handle going through events in reverse order?. + # What does "most recent events" mean when applying the limits mean + # in this case? + raise NotImplementedError() + + now_token = yield self.event_sources.get_current_token() + + presence_stream = self.event_sources.sources["presence"] + pagination_config = PaginationConfig( + from_token=since_token, to_token=now_token + ) + presence, _ = yield presence_stream.get_pagination_rows( + user=sync_config.user, + pagination_config=pagination_config.get_source_config("presence"), + key=None + ) + room_list = yield self.store.get_rooms_for_user_where_membership_is( + user_id=sync_config.user.to_string(), + membership_list=[Membership.INVITE, Membership.JOIN] + ) + + # TODO (mjark): Does public mean "published"? + published_rooms = yield self.store.get_rooms(is_public=True) + published_room_ids = set(r["room_id"] for r in published_rooms) - rooms.append(RoomSyncResult( - room_id=event.room_id, - published=event.room_id in published_room_ids, - events=recent_events, - prev_batch=prev_batch_token, - state=current_state_events, - limited=True, - )) + rooms = [] + for event in room_list: + room_sync = yield self.incremental_sync_with_gap_for_room( + event.room_id, sync_config, since_token, now_token, + published_room_ids + ) + if room_sync: + rooms.append(room_sync) defer.returnValue(SyncResult( public_user_data=presence, @@ -143,5 +228,103 @@ class SyncHandler(BaseHandler): @defer.inlineCallbacks - def incremental_sync(self, sync_config): - pass + def incremental_sync_with_gap_for_room(self, room_id, sync_config, + since_token, now_token, + published_room_ids): + """ Get the incremental delta needed to bring the client up to date for + the room. Gives the client the most recent events and the changes to + state. + Returns: + A Deferred RoomSyncResult + """ + # TODO(mjark): Check if they have joined the room between + # the previous sync and this one. + # TODO(mjark): Apply the event filter in sync_config + # TODO(mjark): Check for redactions we might have missed. + # TODO(mjark): Typing notifications. + recents, token = yield self.store.get_recent_events_for_room( + room_id, + limit=sync_config.limit + 1, + from_token=since_token.room_key, + end_token=now_token.room_key, + ) + + logging.debug("Recents %r", recents) + + if len(recents) > sync_config.limit: + limited = True + recents = recents[1:] + else: + limited = False + + prev_batch_token = now_token.copy_and_replace("room_key", token[0]) + + # TODO(mjark): This seems racy since this isn't being passed a + # token to indicate what point in the stream this is + current_state_events = yield self.state_handler.get_current_state( + room_id + ) + + state_at_previous_sync = yield self.get_state_at_previous_sync( + room_id, since_token=since_token + ) + + state_events_delta = yield self.compute_state_delta( + since_token=since_token, + previous_state=state_at_previous_sync, + current_state=current_state_events, + ) + + room_sync = RoomSyncResult( + room_id=room_id, + published=room_id in published_room_ids, + events=recents, + prev_batch=prev_batch_token, + state=state_events_delta, + limited=limited, + ) + + logging.debug("Room sync: %r", room_sync) + + defer.returnValue(room_sync) + + @defer.inlineCallbacks + def get_state_at_previous_sync(self, room_id, since_token): + """ Get the room state at the previous sync the client made. + Returns: + A Deferred list of Events. + """ + last_events, token = yield self.store.get_recent_events_for_room( + room_id, end_token=since_token.room_key, limit=1, + ) + + if last_events: + last_event = last_events[0] + last_context = yield self.state_handler.compute_event_context( + last_event + ) + if last_event.is_state(): + state = [last_event] + last_context.current_state.values() + else: + state = last_context.current_state.values() + else: + state = () + defer.returnValue(state) + + + def compute_state_delta(self, since_token, previous_state, current_state): + """ Works out the differnce in state between the current state and the + state the client got when it last performed a sync. + Returns: + A list of events. + """ + # TODO(mjark) Check if the state events were received by the server + # after the previous sync, since we need to include those state + # updates even if they occured logically before the previous event. + # TODO(mjark) Check for new redactions in the state events. + previous_dict = {event.event_id:event for event in previous_state} + state_delta = [] + for event in current_state: + if event.event_id not in previous_dict: + state_delta.append(event) + return state_delta diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 8ac2adab05..06aca1a4e5 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -265,17 +265,38 @@ class StreamStore(SQLBaseStore): return self.runInteraction("paginate_room_events", f) def get_recent_events_for_room(self, room_id, limit, end_token, - with_feedback=False): + with_feedback=False, from_token=None): # TODO (erikj): Handle compressed feedback - sql = ( - "SELECT stream_ordering, topological_ordering, event_id FROM events " - "WHERE room_id = ? AND stream_ordering <= ? AND outlier = 0 " - "ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ? " - ) + end_token = _StreamToken.parse_stream_token(end_token) - def f(txn): - txn.execute(sql, (room_id, end_token, limit,)) + if from_token is None: + sql = ( + "SELECT stream_ordering, topological_ordering, event_id" + " FROM events" + " WHERE room_id = ? AND stream_ordering <= ? AND outlier = 0" + " ORDER BY topological_ordering DESC, stream_ordering DESC" + " LIMIT ?" + ) + else: + from_token = _StreamToken.parse_stream_token(from_token) + sql = ( + "SELECT stream_ordering, topological_ordering, event_id" + " FROM events" + " WHERE room_id = ? AND stream_ordering > ?" + " AND stream_ordering <= ? AND outlier = 0" + " ORDER BY topological_ordering DESC, stream_ordering DESC" + " LIMIT ?" + ) + + + def get_recent_events_for_room_txn(txn): + if from_token is None: + txn.execute(sql, (room_id, end_token.stream, limit,)) + else: + txn.execute(sql, ( + room_id, from_token.stream, end_token.stream, limit + )) rows = self.cursor_to_dict(txn) @@ -303,7 +324,9 @@ class StreamStore(SQLBaseStore): return events, token - return self.runInteraction("get_recent_events_for_room", f) + return self.runInteraction( + "get_recent_events_for_room", get_recent_events_for_room_txn + ) def get_room_events_max_id(self): return self.runInteraction( -- cgit 1.5.1 From 92171f9dd1ecac24aeae2f46729f3cbbbe94f91e Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 27 Jan 2015 16:53:59 +0000 Subject: Add stub methods, TODOs and docstrings for application services. --- synapse/handlers/appservice.py | 25 +++++++++++++++++--- synapse/storage/appservice.py | 52 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 72 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 55a653476f..25e1cece56 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -42,8 +42,27 @@ class ApplicationServicesHandler(BaseHandler): "Consult the home server admin." ) - # update AS entry with base URL - - # store namespaces for this AS + # store this AS defer.returnValue("not_implemented_yet") + + def unregister(self, token): + yield self.store.unregister_app_service(token) + + def notify_interested_services(self, event): + """Notifies (pushes) all application services interested in this event. + + Pushing is done asynchronously, so this method won't block for any + prolonged length of time. + + Args: + event(Event): The event to push out to interested services. + """ + # TODO: Gather interested services + # get_services_for_event(event) <-- room IDs and user IDs + # Get a list of room aliases. Check regex. + # TODO: If unknown user: poke User Query API. + # TODO: If unknown room alias: poke Room Alias Query API. + + # TODO: Fork off pushes to these services - XXX First cut, best effort + pass diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 4c11191fe8..fbad17cb9e 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -84,16 +84,60 @@ class ApplicationServiceStore(SQLBaseStore): super(ApplicationServiceStore, self).__init__(hs) self.cache = ApplicationServiceCache() self.clock = hs.get_clock() + self._populate_cache() + + def unregister_app_service(self, token): + """Unregisters this service. + + This removes all AS specific regex and the base URL. The token is the + only thing preserved for future registration attempts. + """ + # TODO: DELETE FROM application_services_regex WHERE id=this service + # TODO: SET url=NULL WHERE token=token + # TODO: Update cache + pass + + def update_app_service(self, service): + """Update an application service, clobbering what was previously there. + + Args: + service(ApplicationService): The updated service. + """ + # NB: There is no "insert" since we provide no public-facing API to + # allocate new ASes. It relies on the server admin inserting the AS + # token into the database manually. + + # TODO: UPDATE application_services, SET url WHERE token=service.token + # TODO: DELETE FROM application_services_regex WHERE id=this service + # TODO: INSERT INTO application_services_regex + # TODO: Update cache + pass + + def get_services_for_event(self, event): + return self.cache.get_services_for_event(event) @defer.inlineCallbacks - def get_app_service(self, as_token): + def get_app_service(self, as_token, from_cache=True): """Get the application service with the given token. Args: token (str): The application service token. + from_cache (bool): True to get this service from the cache, False to + check the database. Raises: - StoreError if there was a problem retrieving this. + StoreError if there was a problem retrieving this service. """ + + if from_cache: + for service in self.cache.services: + if service.token == as_token: + defer.returnValue(service) + return + defer.returnValue(None) + return + + + # TODO: This should be JOINed with the application_services_regex table. row = self._simple_select_one( "application_services", {"token": as_token}, ["url", "token"] @@ -101,3 +145,7 @@ class ApplicationServiceStore(SQLBaseStore): if not row: raise StoreError(400, "Bad application services token supplied.") defer.returnValue(row) + + def _populate_cache(self): + """Populates the ApplicationServiceCache from the database.""" + pass -- cgit 1.5.1 From ec3719b583c6fbbc56dbd313b858054e535ae733 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 27 Jan 2015 17:15:06 +0000 Subject: Use ApplicationService when registering. --- synapse/handlers/appservice.py | 13 +++++-------- synapse/rest/appservice/v1/register.py | 8 ++++++-- synapse/storage/appservice.py | 16 ++++++---------- 3 files changed, 17 insertions(+), 20 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 25e1cece56..1890ca06aa 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -30,21 +30,18 @@ class ApplicationServicesHandler(BaseHandler): super(ApplicationServicesHandler, self).__init__(hs) @defer.inlineCallbacks - def register(self, base_url, token, namespaces): + def register(self, app_service): # check the token is recognised try: - app_service = yield self.store.get_app_service(token) - if not app_service: - raise StoreError + stored_service = yield self.store.get_app_service(app_service.token) + if not stored_service: + raise StoreError(404, "Not found") except StoreError: raise SynapseError( 403, "Unrecognised application services token. " "Consult the home server admin." ) - - # store this AS - - defer.returnValue("not_implemented_yet") + # TODO store this AS def unregister(self, token): yield self.store.unregister_app_service(token) diff --git a/synapse/rest/appservice/v1/register.py b/synapse/rest/appservice/v1/register.py index 142f09a638..5786cf873e 100644 --- a/synapse/rest/appservice/v1/register.py +++ b/synapse/rest/appservice/v1/register.py @@ -18,6 +18,7 @@ from twisted.internet import defer from base import AppServiceRestServlet, as_path_pattern from synapse.api.errors import CodeMessageException, SynapseError +from synapse.storage.appservice import ApplicationService import json import logging @@ -58,7 +59,10 @@ class RegisterRestServlet(AppServiceRestServlet): self._parse_namespace(namespaces, params["namespaces"], "rooms") self._parse_namespace(namespaces, params["namespaces"], "aliases") - hs_token = yield self.handler.register(as_url, as_token, namespaces) + app_service = ApplicationService(as_token, as_url, namespaces) + + yield self.handler.register(app_service) + hs_token = "_not_implemented_yet" # TODO: Pull this from self.hs? defer.returnValue({ "hs_token": hs_token @@ -97,7 +101,7 @@ class UnregisterRestServlet(AppServiceRestServlet): except (KeyError, ValueError): raise SynapseError(400, "Missing required key: as_token(str)") - # TODO: pass to the appservice handler + yield self.handler.unregister(as_token) raise CodeMessageException(500, "Not implemented") diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index fbad17cb9e..f84f026b7b 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -116,8 +116,7 @@ class ApplicationServiceStore(SQLBaseStore): def get_services_for_event(self, event): return self.cache.get_services_for_event(event) - @defer.inlineCallbacks - def get_app_service(self, as_token, from_cache=True): + def get_app_service(self, token, from_cache=True): """Get the application service with the given token. Args: @@ -130,21 +129,18 @@ class ApplicationServiceStore(SQLBaseStore): if from_cache: for service in self.cache.services: - if service.token == as_token: - defer.returnValue(service) - return - defer.returnValue(None) - return - + if service.token == token: + return service + return None # TODO: This should be JOINed with the application_services_regex table. row = self._simple_select_one( - "application_services", {"token": as_token}, + "application_services", {"token": token}, ["url", "token"] ) if not row: raise StoreError(400, "Bad application services token supplied.") - defer.returnValue(row) + return row def _populate_cache(self): """Populates the ApplicationServiceCache from the database.""" -- cgit 1.5.1 From fbeaeb868960099c3682802275d5a222c0cc2d8b Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 27 Jan 2015 17:34:40 +0000 Subject: Log when ASes are registered/unregistered. --- synapse/handlers/appservice.py | 2 ++ synapse/storage/appservice.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 1890ca06aa..c9f56c41eb 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -31,6 +31,7 @@ class ApplicationServicesHandler(BaseHandler): @defer.inlineCallbacks def register(self, app_service): + logger.info("Register -> %s", app_service) # check the token is recognised try: stored_service = yield self.store.get_app_service(app_service.token) @@ -44,6 +45,7 @@ class ApplicationServicesHandler(BaseHandler): # TODO store this AS def unregister(self, token): + logger.info("Unregister as_token=%s", token) yield self.store.unregister_app_service(token) def notify_interested_services(self, event): diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index f84f026b7b..cd15843ba3 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -51,6 +51,9 @@ class ApplicationService(object): return True + def __str__(self): + return "ApplicationService: %s" % (self.__dict__,) + class ApplicationServiceCache(object): """Caches ApplicationServices and provides utility functions on top. @@ -83,7 +86,6 @@ class ApplicationServiceStore(SQLBaseStore): def __init__(self, hs): super(ApplicationServiceStore, self).__init__(hs) self.cache = ApplicationServiceCache() - self.clock = hs.get_clock() self._populate_cache() def unregister_app_service(self, token): -- cgit 1.5.1 From 54e513b4e6b5c644b9a2aeb02cef8258e87ae26a Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Tue, 27 Jan 2015 17:48:13 +0000 Subject: Move storage of user filters into real datastore layer; now have to mock it out in the REST-level tests --- synapse/api/filtering.py | 27 +++--------------- synapse/storage/__init__.py | 3 +- synapse/storage/filtering.py | 46 +++++++++++++++++++++++++++++++ tests/rest/client/v2_alpha/__init__.py | 9 ++++-- tests/rest/client/v2_alpha/test_filter.py | 21 ++++++++++++++ 5 files changed, 79 insertions(+), 27 deletions(-) create mode 100644 synapse/storage/filtering.py (limited to 'synapse/storage') diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 014e2e1fc9..20b6951d47 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -16,37 +16,18 @@ from twisted.internet import defer -# TODO(paul) -_filters_for_user = {} - - class Filtering(object): def __init__(self, hs): super(Filtering, self).__init__() - self.hs = hs + self.store = hs.get_datastore() - @defer.inlineCallbacks def get_user_filter(self, user_localpart, filter_id): - filters = _filters_for_user.get(user_localpart, None) - - if not filters or filter_id >= len(filters): - raise KeyError() + return self.store.get_user_filter(user_localpart, filter_id) - # trivial yield to make it a generator so d.iC works - yield - defer.returnValue(filters[filter_id]) - - @defer.inlineCallbacks def add_user_filter(self, user_localpart, definition): - filters = _filters_for_user.setdefault(user_localpart, []) - - filter_id = len(filters) - filters.append(definition) - - # trivial yield, see above - yield - defer.returnValue(filter_id) + # TODO(paul): implement sanity checking of the definition + return self.store.add_user_filter(user_localpart, definition) # TODO(paul): surely we should probably add a delete_user_filter or # replace_user_filter at some point? There's no REST API specified for diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4beb951b9f..efa63031bd 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -30,9 +30,9 @@ from .transactions import TransactionStore from .keys import KeyStore from .event_federation import EventFederationStore from .media_repository import MediaRepositoryStore - from .state import StateStore from .signatures import SignatureStore +from .filtering import FilteringStore from syutil.base64util import decode_base64 from syutil.jsonutil import encode_canonical_json @@ -82,6 +82,7 @@ class DataStore(RoomMemberStore, RoomStore, DirectoryStore, KeyStore, StateStore, SignatureStore, EventFederationStore, MediaRepositoryStore, + FilteringStore, ): def __init__(self, hs): diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py new file mode 100644 index 0000000000..18e0e7c298 --- /dev/null +++ b/synapse/storage/filtering.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from ._base import SQLBaseStore + + +# TODO(paul) +_filters_for_user = {} + + +class FilteringStore(SQLBaseStore): + @defer.inlineCallbacks + def get_user_filter(self, user_localpart, filter_id): + filters = _filters_for_user.get(user_localpart, None) + + if not filters or filter_id >= len(filters): + raise KeyError() + + # trivial yield to make it a generator so d.iC works + yield + defer.returnValue(filters[filter_id]) + + @defer.inlineCallbacks + def add_user_filter(self, user_localpart, definition): + filters = _filters_for_user.setdefault(user_localpart, []) + + filter_id = len(filters) + filters.append(definition) + + # trivial yield, see above + yield + defer.returnValue(filter_id) diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py index f59745e13c..3fe62d5ac6 100644 --- a/tests/rest/client/v2_alpha/__init__.py +++ b/tests/rest/client/v2_alpha/__init__.py @@ -39,9 +39,7 @@ class V2AlphaRestTestCase(unittest.TestCase): hs = HomeServer("test", db_pool=None, - datastore=Mock(spec=[ - "insert_client_ip", - ]), + datastore=self.make_datastore_mock(), http_client=None, resource_for_client=self.mock_resource, resource_for_federation=self.mock_resource, @@ -58,3 +56,8 @@ class V2AlphaRestTestCase(unittest.TestCase): for r in self.TO_REGISTER: r.register_servlets(hs, self.mock_resource) + + def make_datastore_mock(self): + return Mock(spec=[ + "insert_client_ip", + ]) diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index 8629a1aed6..1add727e6b 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -15,6 +15,8 @@ from twisted.internet import defer +from mock import Mock + from . import V2AlphaRestTestCase from synapse.rest.client.v2_alpha import filter @@ -24,6 +26,25 @@ class FilterTestCase(V2AlphaRestTestCase): USER_ID = "@apple:test" TO_REGISTER = [filter] + def make_datastore_mock(self): + datastore = super(FilterTestCase, self).make_datastore_mock() + + self._user_filters = {} + + def add_user_filter(user_localpart, definition): + filters = self._user_filters.setdefault(user_localpart, []) + filter_id = len(filters) + filters.append(definition) + return defer.succeed(filter_id) + datastore.add_user_filter = add_user_filter + + def get_user_filter(user_localpart, filter_id): + filters = self._user_filters[user_localpart] + return defer.succeed(filters[filter_id]) + datastore.get_user_filter = get_user_filter + + return datastore + @defer.inlineCallbacks def test_filter(self): (code, response) = yield self.mock_resource.trigger("POST", -- cgit 1.5.1 From 06cc1470129d443f71bfc81ba716f63b9505467d Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Tue, 27 Jan 2015 18:46:03 +0000 Subject: Initial stab at real SQL storage implementation of user filter definitions --- synapse/storage/__init__.py | 1 + synapse/storage/filtering.py | 49 +++++++++++++++++++++++++----------- synapse/storage/schema/filtering.sql | 24 ++++++++++++++++++ tests/api/test_filtering.py | 19 +++++++++++++- 4 files changed, 78 insertions(+), 15 deletions(-) create mode 100644 synapse/storage/schema/filtering.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index efa63031bd..7c5631d014 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -61,6 +61,7 @@ SCHEMAS = [ "event_edges", "event_signatures", "media_repository", + "filtering", ] diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index 18e0e7c298..e98eaf8032 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -17,6 +17,8 @@ from twisted.internet import defer from ._base import SQLBaseStore +import json + # TODO(paul) _filters_for_user = {} @@ -25,22 +27,41 @@ _filters_for_user = {} class FilteringStore(SQLBaseStore): @defer.inlineCallbacks def get_user_filter(self, user_localpart, filter_id): - filters = _filters_for_user.get(user_localpart, None) - - if not filters or filter_id >= len(filters): - raise KeyError() + def_json = yield self._simple_select_one_onecol( + table="user_filters", + keyvalues={ + "user_id": user_localpart, + "filter_id": filter_id, + }, + retcol="definition", + allow_none=False, + ) - # trivial yield to make it a generator so d.iC works - yield - defer.returnValue(filters[filter_id]) + defer.returnValue(json.loads(def_json)) - @defer.inlineCallbacks def add_user_filter(self, user_localpart, definition): - filters = _filters_for_user.setdefault(user_localpart, []) + def_json = json.dumps(definition) + + # Need an atomic transaction to SELECT the maximal ID so far then + # INSERT a new one + def _do_txn(txn): + sql = ( + "SELECT MAX(filter_id) FROM user_filters " + "WHERE user_id = ?" + ) + txn.execute(sql, (user_localpart,)) + max_id = txn.fetchone()[0] + if max_id is None: + filter_id = 0 + else: + filter_id = max_id + 1 + + sql = ( + "INSERT INTO user_filters (user_id, filter_id, definition)" + "VALUES(?, ?, ?)" + ) + txn.execute(sql, (user_localpart, filter_id, def_json)) - filter_id = len(filters) - filters.append(definition) + return filter_id - # trivial yield, see above - yield - defer.returnValue(filter_id) + return self.runInteraction("add_user_filter", _do_txn) diff --git a/synapse/storage/schema/filtering.sql b/synapse/storage/schema/filtering.sql new file mode 100644 index 0000000000..795aca4afd --- /dev/null +++ b/synapse/storage/schema/filtering.sql @@ -0,0 +1,24 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS user_filters( + user_id TEXT, + filter_id INTEGER, + definition TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( + user_id, filter_id +); diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index fecadd1056..149948374d 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -53,16 +53,33 @@ class FilteringTestCase(unittest.TestCase): self.filtering = hs.get_filtering() + self.datastore = hs.get_datastore() + @defer.inlineCallbacks - def test_filter(self): + def test_add_filter(self): filter_id = yield self.filtering.add_user_filter( user_localpart=user_localpart, definition={"type": ["m.*"]}, ) + self.assertEquals(filter_id, 0) + self.assertEquals({"type": ["m.*"]}, + (yield self.datastore.get_user_filter( + user_localpart=user_localpart, + filter_id=0, + )) + ) + + @defer.inlineCallbacks + def test_get_filter(self): + filter_id = yield self.datastore.add_user_filter( + user_localpart=user_localpart, + definition={"type": ["m.*"]}, + ) filter = yield self.filtering.get_user_filter( user_localpart=user_localpart, filter_id=filter_id, ) + self.assertEquals(filter, {"type": ["m.*"]}) -- cgit 1.5.1 From 8398f19bcea8fb0134b37efa303dc65b017d75ce Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Tue, 27 Jan 2015 19:00:09 +0000 Subject: Created schema delta --- synapse/storage/__init__.py | 2 +- synapse/storage/schema/delta/v12.sql | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/v12.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7c5631d014..00a04f565d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -67,7 +67,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 11 +SCHEMA_VERSION = 12 class _RollbackButIsFineException(Exception): diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql new file mode 100644 index 0000000000..795aca4afd --- /dev/null +++ b/synapse/storage/schema/delta/v12.sql @@ -0,0 +1,24 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS user_filters( + user_id TEXT, + filter_id INTEGER, + definition TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( + user_id, filter_id +); -- cgit 1.5.1 From e020574d65a994858ac53c45070ae5016090d2f3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 27 Jan 2015 20:19:36 +0000 Subject: Fix Formatting --- synapse/handlers/sync.py | 13 +++++-------- synapse/notifier.py | 4 ++-- synapse/rest/client/v2_alpha/sync.py | 5 ++--- synapse/storage/stream.py | 1 - 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9f5f73eab6..82a2c6986a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -52,10 +52,10 @@ class RoomSyncResult(collections.namedtuple("RoomSyncResult", [ class SyncResult(collections.namedtuple("SyncResult", [ - "next_batch", # Token for the next sync - "private_user_data", # List of private events for the user. - "public_user_data", # List of public events for all users. - "rooms", # RoomSyncResult for each room. + "next_batch", # Token for the next sync + "private_user_data", # List of private events for the user. + "public_user_data", # List of public events for all users. + "rooms", # RoomSyncResult for each room. ])): __slots__ = [] @@ -181,7 +181,6 @@ class SyncHandler(BaseHandler): limited=True, )) - @defer.inlineCallbacks def incremental_sync_with_gap(self, sync_config, since_token): """ Get the incremental delta needed to bring the client up to @@ -231,7 +230,6 @@ class SyncHandler(BaseHandler): next_batch=now_token, )) - @defer.inlineCallbacks def incremental_sync_with_gap_for_room(self, room_id, sync_config, since_token, now_token, @@ -316,7 +314,6 @@ class SyncHandler(BaseHandler): state = () defer.returnValue(state) - def compute_state_delta(self, since_token, previous_state, current_state): """ Works out the differnce in state between the current state and the state the client got when it last performed a sync. @@ -327,7 +324,7 @@ class SyncHandler(BaseHandler): # after the previous sync, since we need to include those state # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. - previous_dict = {event.event_id:event for event in previous_state} + previous_dict = {event.event_id: event for event in previous_state} state_delta = [] for event in current_state: if event.event_id not in previous_dict: diff --git a/synapse/notifier.py b/synapse/notifier.py index 922bf064d0..e3b6ead620 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -214,7 +214,7 @@ class Notifier(object): deferred = defer.Deferred() - from_token=StreamToken("s0","0","0") + from_token = StreamToken("s0", "0", "0") listener = [_NotificationListener( user=user, @@ -231,6 +231,7 @@ class Notifier(object): result = yield callback() if timeout: timed_out = [False] + def _timeout_listener(): timed_out[0] = True listener[0].notify(self, [], from_token, from_token) @@ -252,7 +253,6 @@ class Notifier(object): defer.returnValue(result) - def get_events_for(self, user, rooms, pagination_config, timeout): """ For the given user and rooms, return any new events for them. If there are no new events wait for up to `timeout` milliseconds for any diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index cc667ebafc..0c17208cd3 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -68,7 +68,6 @@ class SyncRestServlet(RestServlet): } """ - PATTERN = client_v2_pattern("/sync$") ALLOWED_SORT = set(["timeline,asc", "timeline,desc"]) ALLOWED_PRESENCE = set(["online", "offline", "idle"]) @@ -114,12 +113,12 @@ class SyncRestServlet(RestServlet): sync_config = SyncConfig( user=user, - device="TODO", # TODO(mjark) Get the device_id from access_token + device="TODO", # TODO(mjark) Get the device_id from access_token gap=gap, limit=limit, sort=sort, backfill=backfill, - filter="TODO", # TODO(mjark) Add the filter to the config. + filter="TODO", # TODO(mjark) Add the filter to the config. ) if since is not None: diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 06aca1a4e5..db1816ea84 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -289,7 +289,6 @@ class StreamStore(SQLBaseStore): " LIMIT ?" ) - def get_recent_events_for_room_txn(txn): if from_token is None: txn.execute(sql, (room_id, end_token.stream, limit,)) -- cgit 1.5.1 From b46fa8603e8e0726ea12310a77ade5cea59c3ae2 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 28 Jan 2015 09:17:48 +0000 Subject: Remove unused import --- synapse/storage/appservice.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index cd15843ba3..533fac4972 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.api.errors import StoreError from ._base import SQLBaseStore -- cgit 1.5.1 From 42876969b99b6bad146b44a734e8d4a1a14d6835 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 28 Jan 2015 11:59:38 +0000 Subject: Add basic application_services SQL, and hook up parts of the appservice store to read from it. --- synapse/handlers/appservice.py | 7 +- synapse/rest/appservice/v1/register.py | 4 +- synapse/storage/__init__.py | 1 + synapse/storage/appservice.py | 89 ++++++++++++++++++++++--- synapse/storage/schema/application_services.sql | 32 +++++++++ 5 files changed, 117 insertions(+), 16 deletions(-) create mode 100644 synapse/storage/schema/application_services.sql (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index c9f56c41eb..8bd475cbfd 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -16,7 +16,7 @@ from twisted.internet import defer from ._base import BaseHandler -from synapse.api.errors import StoreError, SynapseError +from synapse.api.errors import Codes, StoreError, SynapseError import logging @@ -36,11 +36,12 @@ class ApplicationServicesHandler(BaseHandler): try: stored_service = yield self.store.get_app_service(app_service.token) if not stored_service: - raise StoreError(404, "Not found") + raise StoreError(404, "Application Service Not found") except StoreError: raise SynapseError( 403, "Unrecognised application services token. " - "Consult the home server admin." + "Consult the home server admin.", + errcode=Codes.FORBIDDEN ) # TODO store this AS diff --git a/synapse/rest/appservice/v1/register.py b/synapse/rest/appservice/v1/register.py index 5786cf873e..e374d538e7 100644 --- a/synapse/rest/appservice/v1/register.py +++ b/synapse/rest/appservice/v1/register.py @@ -64,9 +64,9 @@ class RegisterRestServlet(AppServiceRestServlet): yield self.handler.register(app_service) hs_token = "_not_implemented_yet" # TODO: Pull this from self.hs? - defer.returnValue({ + defer.returnValue((200, { "hs_token": hs_token - }) + })) def _parse_namespace(self, target_ns, origin_ns, ns): if ns not in target_ns or ns not in origin_ns: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 9431c1a32d..e86b981b47 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -62,6 +62,7 @@ SCHEMAS = [ "event_edges", "event_signatures", "media_repository", + "application_services" ] diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 533fac4972..5a0e47e0d4 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -12,12 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from synapse.api.errors import StoreError +import logging +from twisted.internet import defer from ._base import SQLBaseStore +logger = logging.getLogger(__name__) + + # XXX: This feels like it should belong in a "models" module, not storage. class ApplicationService(object): """Defines an application service. @@ -30,7 +33,22 @@ class ApplicationService(object): if url: self.url = url if namespaces: - self.namespaces = namespaces + self._set_namespaces(namespaces) + + def _set_namespaces(self, namespaces): + # Sanity check that it is of the form: + # { + # users: ["regex",...], + # aliases: ["regex",...], + # rooms: ["regex",...], + # } + for ns in ["users", "rooms", "aliases"]: + if type(namespaces[ns]) != list: + raise ValueError("Bad namespace value for '%s'", ns) + for regex in namespaces[ns]: + if not isinstance(regex, basestring): + raise ValueError("Expected string regex for ns '%s'", ns) + self.namespaces = namespaces def is_interested(self, event): """Check if this service is interested in this event. @@ -133,15 +151,64 @@ class ApplicationServiceStore(SQLBaseStore): return service return None + # TODO: The from_cache=False impl # TODO: This should be JOINed with the application_services_regex table. - row = self._simple_select_one( - "application_services", {"token": token}, - ["url", "token"] - ) - if not row: - raise StoreError(400, "Bad application services token supplied.") - return row + + @defer.inlineCallbacks def _populate_cache(self): """Populates the ApplicationServiceCache from the database.""" - pass + sql = ("SELECT * FROM application_services LEFT JOIN " + "application_services_regex ON application_services.id = " + "application_services_regex.as_id") + + namespace_enum = [ + "users", # 0 + "aliases", # 1 + "rooms" # 2 + ] + # SQL results in the form: + # [ + # { + # 'regex': "something", + # 'url': "something", + # 'namespace': enum, + # 'as_id': 0, + # 'token': "something", + # 'id': 0 + # } + # ] + services = {} + results = yield self._execute_and_decode(sql) + for res in results: + as_token = res["token"] + if as_token not in services: + # add the service + services[as_token] = { + "url": res["url"], + "token": as_token, + "namespaces": { + "users": [], + "aliases": [], + "rooms": [] + } + } + # add the namespace regex if one exists + ns_int = res["namespace"] + if ns_int is None: + continue + try: + services[as_token]["namespaces"][namespace_enum[ns_int]].append( + res["regex"] + ) + except IndexError: + logger.error("Bad namespace enum '%s'. %s", ns_int, res) + + for service in services.values(): + logger.info("Found application service: %s", service) + self.cache.services.append(ApplicationService( + service["token"], + service["url"], + service["namespaces"] + )) + diff --git a/synapse/storage/schema/application_services.sql b/synapse/storage/schema/application_services.sql new file mode 100644 index 0000000000..6d245fc807 --- /dev/null +++ b/synapse/storage/schema/application_services.sql @@ -0,0 +1,32 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS application_services( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT, + token TEXT, + UNIQUE(token) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS application_services_regex( + id INTEGER PRIMARY KEY AUTOINCREMENT, + as_id INTEGER NOT NULL, + namespace INTEGER, /* enum[room_id|room_alias|user_id] */ + regex TEXT, + FOREIGN KEY(as_id) REFERENCES application_services(id) +); + + + -- cgit 1.5.1 From 0cbb6b0f5235e4501a0fb360e881d152644a17cd Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 14:44:41 +0000 Subject: Google doc style --- synapse/storage/_base.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 4f172d3967..809c81f47f 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -195,10 +195,11 @@ class SQLBaseStore(object): def _simple_upsert(self, table, keyvalues, values): """ - :param table: The table to upsert into - :param keyvalues: Dict of the unique key tables and their new values - :param values: Dict of all the nonunique columns and their new values - :return: A deferred + Args: + table (str): The table to upsert into + keyvalues (dict): The unique key tables and their new values + values (dict): The nonunique columns and their new values + Returns: A deferred """ return self.runInteraction( "_simple_upsert", -- cgit 1.5.1 From fb0928097a0dc1606aebb9aed8f070bcea304178 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 14:48:07 +0000 Subject: More magic commas (including the place I copied it from...) --- synapse/storage/_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 809c81f47f..9261c999cb 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -210,8 +210,8 @@ class SQLBaseStore(object): # Try to update sql = "UPDATE %s SET %s WHERE %s" % ( table, - ", ".join("%s = ?" % (k) for k in values), - " AND ".join("%s = ?" % (k) for k in keyvalues) + ", ".join("%s = ?" % (k,) for k in values), + " AND ".join("%s = ?" % (k,) for k in keyvalues) ) sqlargs = values.values() + keyvalues.values() logger.debug( @@ -390,8 +390,8 @@ class SQLBaseStore(object): if updatevalues: update_sql = "UPDATE %s SET %s WHERE %s" % ( table, - ", ".join("%s = ?" % (k) for k in updatevalues), - " AND ".join("%s = ?" % (k) for k in keyvalues) + ", ".join("%s = ?" % (k,) for k in updatevalues), + " AND ".join("%s = ?" % (k,) for k in keyvalues) ) def func(txn): -- cgit 1.5.1 From 6d485dd1c727e7ecfe3991066bd058794ae05051 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 14:48:42 +0000 Subject: unnecessary newlines --- synapse/storage/_base.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 9261c999cb..4e8bd3faa9 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -237,8 +237,6 @@ class SQLBaseStore(object): ) txn.execute(sql, allvalues.values()) - - def _simple_select_one(self, table, keyvalues, retcols, allow_none=False): """Executes a SELECT query on the named table, which is expected to -- cgit 1.5.1 From 445ad9941ea2e4038846aa6fed456e3250ae49b1 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 14:49:59 +0000 Subject: Redundant parens --- synapse/storage/push_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index ca04f2ccee..f5a736be44 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -92,7 +92,7 @@ class PushRuleStore(SQLBaseStore): res = txn.fetchall() if not res: raise RuleNotFoundException("before/after rule not found: %s" % (relative_to_rule)) - (priority_class, base_rule_priority) = res[0] + priority_class, base_rule_priority = res[0] if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class: raise InconsistentRuleException( -- cgit 1.5.1 From 93aac9bb7b3023e6c82961b1cdd655a48ec567fb Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 14:51:01 +0000 Subject: Newline --- synapse/storage/push_rule.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f5a736be44..48105234f6 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -184,6 +184,7 @@ class PushRuleStore(SQLBaseStore): } ) + class RuleNotFoundException(Exception): pass -- cgit 1.5.1 From e78dd332928c111c8a62985bce0a3c1c5631244e Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 14:52:58 +0000 Subject: Use %s instead of + --- synapse/storage/push_rule.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 48105234f6..0342996ed1 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -85,8 +85,8 @@ class PushRuleStore(SQLBaseStore): # get the priority of the rule we're inserting after/before sql = ( - "SELECT priority_class, priority FROM "+PushRuleTable.table_name+ - " WHERE user_name = ? and rule_id = ?" + "SELECT priority_class, priority FROM ? " + "WHERE user_name = ? and rule_id = ?" % (PushRuleTable.table_name,) ) txn.execute(sql, (user_name, relative_to_rule)) res = txn.fetchall() -- cgit 1.5.1 From 60b143a52e69751a406ea83cdab58f4045cdd9d4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 15:48:28 +0000 Subject: Move pushers delta to v12 and bump schema version --- synapse/storage/__init__.py | 2 +- synapse/storage/schema/delta/v10.sql | 46 ------------------------------------ synapse/storage/schema/delta/v12.sql | 46 ++++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 47 deletions(-) delete mode 100644 synapse/storage/schema/delta/v10.sql create mode 100644 synapse/storage/schema/delta/v12.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 2534d109fd..277581b4e2 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -69,7 +69,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 11 +SCHEMA_VERSION = 12 class _RollbackButIsFineException(Exception): diff --git a/synapse/storage/schema/delta/v10.sql b/synapse/storage/schema/delta/v10.sql deleted file mode 100644 index 8c4dfd5c1b..0000000000 --- a/synapse/storage/schema/delta/v10.sql +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright 2014 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Push notification endpoints that users have configured -CREATE TABLE IF NOT EXISTS pushers ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - instance_handle varchar(32) NOT NULL, - kind varchar(8) NOT NULL, - app_id varchar(64) NOT NULL, - app_display_name varchar(64) NOT NULL, - device_display_name varchar(128) NOT NULL, - pushkey blob NOT NULL, - ts BIGINT NOT NULL, - lang varchar(8), - data blob, - last_token TEXT, - last_success BIGINT, - failing_since BIGINT, - FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (app_id, pushkey) -); - -CREATE TABLE IF NOT EXISTS push_rules ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - rule_id TEXT NOT NULL, - priority_class TINYINT NOT NULL, - priority INTEGER NOT NULL DEFAULT 0, - conditions TEXT NOT NULL, - actions TEXT NOT NULL, - UNIQUE(user_name, rule_id) -); - -CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql new file mode 100644 index 0000000000..8c4dfd5c1b --- /dev/null +++ b/synapse/storage/schema/delta/v12.sql @@ -0,0 +1,46 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Push notification endpoints that users have configured +CREATE TABLE IF NOT EXISTS pushers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + instance_handle varchar(32) NOT NULL, + kind varchar(8) NOT NULL, + app_id varchar(64) NOT NULL, + app_display_name varchar(64) NOT NULL, + device_display_name varchar(128) NOT NULL, + pushkey blob NOT NULL, + ts BIGINT NOT NULL, + lang varchar(8), + data blob, + last_token TEXT, + last_success BIGINT, + failing_since BIGINT, + FOREIGN KEY(user_name) REFERENCES users(name), + UNIQUE (app_id, pushkey) +); + +CREATE TABLE IF NOT EXISTS push_rules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + priority_class TINYINT NOT NULL, + priority INTEGER NOT NULL DEFAULT 0, + conditions TEXT NOT NULL, + actions TEXT NOT NULL, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); -- cgit 1.5.1 From 0ef5bfd6a9eaaae14e199997658b3d0006abd854 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Jan 2015 16:16:53 +0000 Subject: Start implementing auth conflict res --- synapse/api/auth.py | 38 +++--- synapse/api/constants.py | 6 + synapse/federation/federation_client.py | 39 ++++++ synapse/handlers/federation.py | 211 ++++++++++++++++++++++++++------ synapse/storage/rejections.py | 10 ++ synapse/storage/schema/im.sql | 1 + 6 files changed, 253 insertions(+), 52 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a342a0e0da..461faa8c78 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -353,9 +353,23 @@ class Auth(object): def add_auth_events(self, builder, context): yield run_on_reactor() - if builder.type == EventTypes.Create: - builder.auth_events = [] - return + auth_ids = self.compute_auth_events(builder, context) + + auth_events_entries = yield self.store.add_event_hashes( + auth_ids + ) + + builder.auth_events = auth_events_entries + + context.auth_events = { + k: v + for k, v in context.current_state.items() + if v.event_id in auth_ids + } + + def compute_auth_events(self, event, context): + if event.type == EventTypes.Create: + return [] auth_ids = [] @@ -368,7 +382,7 @@ class Auth(object): key = (EventTypes.JoinRules, "", ) join_rule_event = context.current_state.get(key) - key = (EventTypes.Member, builder.user_id, ) + key = (EventTypes.Member, event.user_id, ) member_event = context.current_state.get(key) key = (EventTypes.Create, "", ) @@ -382,8 +396,8 @@ class Auth(object): else: is_public = False - if builder.type == EventTypes.Member: - e_type = builder.content["membership"] + if event.type == EventTypes.Member: + e_type = event.content["membership"] if e_type in [Membership.JOIN, Membership.INVITE]: if join_rule_event: auth_ids.append(join_rule_event.event_id) @@ -398,17 +412,7 @@ class Auth(object): if member_event.content["membership"] == Membership.JOIN: auth_ids.append(member_event.event_id) - auth_events_entries = yield self.store.add_event_hashes( - auth_ids - ) - - builder.auth_events = auth_events_entries - - context.auth_events = { - k: v - for k, v in context.current_state.items() - if v.event_id in auth_ids - } + return auth_ids @log_function def _can_send_event(self, event, auth_events): diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 7ee6dcc46e..0d3fc629af 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -74,3 +74,9 @@ class EventTypes(object): Message = "m.room.message" Topic = "m.room.topic" Name = "m.room.name" + + +class RejectedReason(object): + AUTH_ERROR = "auth_error" + REPLACED = "replaced" + NOT_ANCESTOR = "not_ancestor" diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 91b44cd8b3..ebcd593506 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -331,6 +331,45 @@ class FederationClient(object): defer.returnValue(pdu) + @defer.inlineCallbacks + def query_auth(self, destination, room_id, event_id, local_auth): + """ + Params: + destination (str) + event_it (str) + local_auth (list) + """ + time_now = self._clock.time_msec() + + send_content = { + "auth_chain": [e.get_pdu_json(time_now) for e in local_auth], + } + + code, content = yield self.transport_layer.send_invite( + destination=destination, + room_id=room_id, + event_id=event_id, + content=send_content, + ) + + auth_chain = [ + (yield self._check_sigs_and_hash(self.event_from_pdu_json(e))) + for e in content["auth_chain"] + ] + + missing = [ + (yield self._check_sigs_and_hash(self.event_from_pdu_json(e))) + for e in content.get("missing", []) + ] + + ret = { + "auth_chain": auth_chain, + "rejects": content.get("rejects", []), + "missing": missing, + } + + defer.returnValue(ret) + def event_from_pdu_json(self, pdu_json, outlier=False): event = FrozenEvent( pdu_json diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index bcdcc90a18..97e3c503b9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -17,19 +17,16 @@ from ._base import BaseHandler -from synapse.events.utils import prune_event from synapse.api.errors import ( - AuthError, FederationError, SynapseError, StoreError, + AuthError, FederationError, StoreError, ) -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor from synapse.crypto.event_signing import ( - compute_event_signature, check_event_content_hash, - add_hashes_and_signatures, + compute_event_signature, add_hashes_and_signatures, ) from synapse.types import UserID -from syutil.jsonutil import encode_canonical_json from twisted.internet import defer @@ -113,33 +110,6 @@ class FederationHandler(BaseHandler): logger.debug("Processing event: %s", event.event_id) - redacted_event = prune_event(event) - - redacted_pdu_json = redacted_event.get_pdu_json() - try: - yield self.keyring.verify_json_for_server( - event.origin, redacted_pdu_json - ) - except SynapseError as e: - logger.warn( - "Signature check failed for %s redacted to %s", - encode_canonical_json(pdu.get_pdu_json()), - encode_canonical_json(redacted_pdu_json), - ) - raise FederationError( - "ERROR", - e.code, - e.msg, - affected=event.event_id, - ) - - if not check_event_content_hash(event): - logger.warn( - "Event content has been tampered, redacting %s, %s", - event.event_id, encode_canonical_json(event.get_dict()) - ) - event = redacted_event - logger.debug("Event: %s", event) # FIXME (erikj): Awful hack to make the case where we are not currently @@ -180,7 +150,6 @@ class FederationHandler(BaseHandler): if state: for e in state: - logging.info("A :) %r", e) e.internal_metadata.outlier = True try: yield self._handle_new_event(e) @@ -747,7 +716,20 @@ class FederationHandler(BaseHandler): event.event_id, event.signatures, ) - self.auth.check(event, auth_events=context.auth_events) + try: + self.auth.check(event, auth_events=context.auth_events) + except AuthError: + # TODO: Store rejection. + context.rejected = RejectedReason.AUTH_ERROR + + yield self.store.persist_event( + event, + context=context, + backfilled=backfilled, + is_new_state=False, + current_state=current_state, + ) + raise logger.debug( "_handle_new_event: Before persist_event: %s, sigs: %s", @@ -768,3 +750,162 @@ class FederationHandler(BaseHandler): ) defer.returnValue(context) + + @defer.inlineCallbacks + def do_auth(self, origin, event, context): + for e_id, _ in event.auth_events: + pass + + auth_events = set(e_id for e_id, _ in event.auth_events) + current_state = set(e.event_id for e in context.auth_events.values()) + + missing_auth = auth_events - current_state + + if missing_auth: + # Do auth conflict res. + + # 1. Get what we think is the auth chain. + auth_ids = self.auth.compute_auth_events(event, context) + local_auth_chain = yield self.store.get_auth_chain(auth_ids) + + # 2. Get remote difference. + result = yield self.replication_layer.query_auth( + origin, + event.room_id, + event.event_id, + local_auth_chain, + ) + + # 3. Process any remote auth chain events we haven't seen. + for e in result.get("missing", []): + # TODO. + pass + + # 4. Look at rejects and their proofs. + # TODO. + + try: + self.auth.check(event, auth_events=context.auth_events) + except AuthError: + raise + + @defer.inlineCallbacks + def construct_auth_difference(self, local_auth, remote_auth): + """ Given a local and remote auth chain, find the differences. This + assumes that we have already processed all events in remote_auth + + Params: + local_auth (list) + remote_auth (list) + + Returns: + dict + """ + + # TODO: Make sure we are OK with local_auth or remote_auth having more + # auth events in them than strictly necessary. + + def sort_fun(ev): + return ev.depth, ev.event_id + + # We find the differences by starting at the "bottom" of each list + # and iterating up on both lists. The lists are ordered by depth and + # then event_id, we iterate up both lists until we find the event ids + # don't match. Then we look at depth/event_id to see which side is + # missing that event, and iterate only up that list. Repeat. + + remote_list = list(remote_auth) + remote_list.sort(key=sort_fun) + + local_list = list(local_auth) + local_list.sort(key=sort_fun) + + local_iter = iter(local_list) + remote_iter = iter(remote_list) + + current_local = local_iter.next() + current_remote = remote_iter.next() + + def get_next(it, opt=None): + return it.next() if it.has_next() else opt + + missing_remotes = [] + missing_locals = [] + while current_local and current_remote: + if current_remote is None: + missing_locals.append(current_local) + current_local = get_next(local_iter) + continue + + if current_local is None: + missing_remotes.append(current_remote) + current_remote = get_next(remote_iter) + continue + + if current_local.event_id == current_remote.event_id: + current_local = get_next(local_iter) + current_remote = get_next(remote_iter) + continue + + if current_local.depth < current_remote.depth: + missing_locals.append(current_local) + current_local = get_next(local_iter) + continue + + if current_local.depth > current_remote.depth: + missing_remotes.append(current_remote) + current_remote = get_next(remote_iter) + continue + + # They have the same depth, so we fall back to the event_id order + if current_local.event_id < current_remote.event_id: + missing_locals.append(current_local) + current_local = get_next(local_iter) + + if current_local.event_id > current_remote.event_id: + missing_remotes.append(current_remote) + current_remote = get_next(remote_iter) + continue + + # missing locals should be sent to the server + # We should find why we are missing remotes, as they will have been + # rejected. + + # Remove events from missing_remotes if they are referencing a missing + # remote. We only care about the "root" rejected ones. + missing_remote_ids = [e.event_id for e in missing_remotes] + base_remote_rejected = list(missing_remotes) + for e in missing_remotes: + for e_id, _ in e.auth_events: + if e_id in missing_remote_ids: + base_remote_rejected.remove(e) + + reason_map = {} + + for e in base_remote_rejected: + reason = yield self.store.get_rejection_reason(e.event_id) + if reason is None: + # FIXME: ERRR?! + raise RuntimeError("") + + reason_map[e.event_id] = reason + + if reason == RejectedReason.AUTH_ERROR: + pass + elif reason == RejectedReason.REPLACED: + # TODO: Get proof + pass + elif reason == RejectedReason.NOT_ANCESTOR: + # TODO: Get proof. + pass + + defer.returnValue({ + "rejects": { + e.event_id: { + "reason": reason_map[e.event_id], + "proof": None, + } + for e in base_remote_rejected + }, + "missing": missing_locals, + }) diff --git a/synapse/storage/rejections.py b/synapse/storage/rejections.py index 7d38b31f44..b7249700d7 100644 --- a/synapse/storage/rejections.py +++ b/synapse/storage/rejections.py @@ -31,3 +31,13 @@ class RejectionsStore(SQLBaseStore): "last_failure": self._clock.time_msec(), } ) + + def get_rejection_reason(self, event_id): + self._simple_select_one_onecol( + table="rejections", + retcol="reason", + keyvalues={ + "event_id": event_id, + }, + allow_none=True, + ) diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql index bc7c6b6ed5..5866a387f6 100644 --- a/synapse/storage/schema/im.sql +++ b/synapse/storage/schema/im.sql @@ -128,5 +128,6 @@ CREATE TABLE IF NOT EXISTS rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, + root_rejected TEXT, CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE ); -- cgit 1.5.1 From c23e3db544eb940d95a092b661e3872480f3bf30 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 28 Jan 2015 16:45:18 +0000 Subject: Add filter JSON sanity checks. --- synapse/api/filtering.py | 109 +++++++++++++++++++++++++++++++-- synapse/rest/client/v2_alpha/filter.py | 2 +- synapse/storage/filtering.py | 4 +- tests/api/test_filtering.py | 24 ++++++-- 4 files changed, 128 insertions(+), 11 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 20b6951d47..6c7a73b6d5 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from synapse.api.errors import SynapseError +from synapse.types import UserID, RoomID class Filtering(object): @@ -25,10 +26,110 @@ class Filtering(object): def get_user_filter(self, user_localpart, filter_id): return self.store.get_user_filter(user_localpart, filter_id) - def add_user_filter(self, user_localpart, definition): - # TODO(paul): implement sanity checking of the definition - return self.store.add_user_filter(user_localpart, definition) + def add_user_filter(self, user_localpart, user_filter): + self._check_valid_filter(user_filter) + return self.store.add_user_filter(user_localpart, user_filter) # TODO(paul): surely we should probably add a delete_user_filter or # replace_user_filter at some point? There's no REST API specified for # them however + + def _check_valid_filter(self, user_filter): + """Check if the provided filter is valid. + + This inspects all definitions contained within the filter. + + Args: + user_filter(dict): The filter + Raises: + SynapseError: If the filter is not valid. + """ + # NB: Filters are the complete json blobs. "Definitions" are an + # individual top-level key e.g. public_user_data. Filters are made of + # many definitions. + + top_level_definitions = [ + "public_user_data", "private_user_data", "server_data" + ] + + room_level_definitions = [ + "state", "events", "ephemeral" + ] + + for key in top_level_definitions: + if key in user_filter: + self._check_definition(user_filter[key]) + + if "room" in user_filter: + for key in room_level_definitions: + if key in user_filter["room"]: + self._check_definition(user_filter["room"][key]) + + + def _check_definition(self, definition): + """Check if the provided definition is valid. + + This inspects not only the types but also the values to make sure they + make sense. + + Args: + definition(dict): The filter definition + Raises: + SynapseError: If there was a problem with this definition. + """ + # NB: Filters are the complete json blobs. "Definitions" are an + # individual top-level key e.g. public_user_data. Filters are made of + # many definitions. + if type(definition) != dict: + raise SynapseError( + 400, "Expected JSON object, not %s" % (definition,) + ) + + # check rooms are valid room IDs + room_id_keys = ["rooms", "not_rooms"] + for key in room_id_keys: + if key in definition: + if type(definition[key]) != list: + raise SynapseError(400, "Expected %s to be a list." % key) + for room_id in definition[key]: + RoomID.from_string(room_id) + + # check senders are valid user IDs + user_id_keys = ["senders", "not_senders"] + for key in user_id_keys: + if key in definition: + if type(definition[key]) != list: + raise SynapseError(400, "Expected %s to be a list." % key) + for user_id in definition[key]: + UserID.from_string(user_id) + + # TODO: We don't limit event type values but we probably should... + # check types are valid event types + event_keys = ["types", "not_types"] + for key in event_keys: + if key in definition: + if type(definition[key]) != list: + raise SynapseError(400, "Expected %s to be a list." % key) + for event_type in definition[key]: + if not isinstance(event_type, basestring): + raise SynapseError(400, "Event type should be a string") + + try: + event_format = definition["format"] + if event_format not in ["federation", "events"]: + raise SynapseError(400, "Invalid format: %s" % (event_format,)) + except KeyError: + pass # format is optional + + try: + event_select_list = definition["select"] + for select_key in event_select_list: + if select_key not in ["event_id", "origin_server_ts", + "thread_id", "content", "content.body"]: + raise SynapseError(400, "Bad select: %s" % (select_key,)) + except KeyError: + pass # select is optional + + if ("bundle_updates" in definition and + type(definition["bundle_updates"]) != bool): + raise SynapseError(400, "Bad bundle_updates: expected bool.") diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 09e44e8ae0..81a3e95155 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -93,7 +93,7 @@ class CreateFilterRestServlet(RestServlet): filter_id = yield self.filtering.add_user_filter( user_localpart=target_user.localpart, - definition=content, + user_filter=content, ) defer.returnValue((200, {"filter_id": str(filter_id)})) diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index e98eaf8032..bab68a9eef 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -39,8 +39,8 @@ class FilteringStore(SQLBaseStore): defer.returnValue(json.loads(def_json)) - def add_user_filter(self, user_localpart, definition): - def_json = json.dumps(definition) + def add_user_filter(self, user_localpart, user_filter): + def_json = json.dumps(user_filter) # Need an atomic transaction to SELECT the maximal ID so far then # INSERT a new one diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 149948374d..188fbfb91e 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -57,13 +57,21 @@ class FilteringTestCase(unittest.TestCase): @defer.inlineCallbacks def test_add_filter(self): + user_filter = { + "room": { + "state": { + "types": ["m.*"] + } + } + } + filter_id = yield self.filtering.add_user_filter( user_localpart=user_localpart, - definition={"type": ["m.*"]}, + user_filter=user_filter, ) self.assertEquals(filter_id, 0) - self.assertEquals({"type": ["m.*"]}, + self.assertEquals(user_filter, (yield self.datastore.get_user_filter( user_localpart=user_localpart, filter_id=0, @@ -72,9 +80,17 @@ class FilteringTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_filter(self): + user_filter = { + "room": { + "state": { + "types": ["m.*"] + } + } + } + filter_id = yield self.datastore.add_user_filter( user_localpart=user_localpart, - definition={"type": ["m.*"]}, + user_filter=user_filter, ) filter = yield self.filtering.get_user_filter( @@ -82,4 +98,4 @@ class FilteringTestCase(unittest.TestCase): filter_id=filter_id, ) - self.assertEquals(filter, {"type": ["m.*"]}) + self.assertEquals(filter, user_filter) -- cgit 1.5.1 From 388581e087a3658c1b70d2aa1d17a132953350ca Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 28 Jan 2015 16:58:23 +0000 Subject: Extract the id token of the token when authing users, include the token and device_id in the internal meta data for the event along with the transaction id when sending events --- synapse/api/auth.py | 8 ++-- synapse/handlers/message.py | 12 +++++- synapse/rest/client/v1/admin.py | 2 +- synapse/rest/client/v1/directory.py | 4 +- synapse/rest/client/v1/events.py | 4 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/presence.py | 8 ++-- synapse/rest/client/v1/profile.py | 4 +- synapse/rest/client/v1/room.py | 64 +++++++++++++++++------------ synapse/rest/client/v1/voip.py | 2 +- synapse/rest/media/v0/content_repository.py | 2 +- synapse/rest/media/v1/upload_resource.py | 2 +- synapse/storage/registration.py | 3 +- synapse/types.py | 3 ++ tests/rest/client/v1/test_presence.py | 2 + tests/rest/client/v1/test_rooms.py | 7 ++++ tests/rest/client/v1/test_typing.py | 1 + tests/storage/test_registration.py | 10 ++++- 18 files changed, 92 insertions(+), 48 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 292e9e2a80..3959e06a8b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -21,7 +21,7 @@ from synapse.api.constants import EventTypes, Membership, JoinRules from synapse.api.errors import AuthError, StoreError, Codes, SynapseError from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor -from synapse.types import UserID +from synapse.types import UserID, ClientID import logging @@ -292,7 +292,7 @@ class Auth(object): Returns: Tuple of UserID and device string: User ID object of the user making the request - Device ID string of the device the user is using + Client ID object of the client instance the user is using Raises: AuthError if no user by that token exists or the token is invalid. """ @@ -302,6 +302,7 @@ class Auth(object): user_info = yield self.get_user_by_token(access_token) user = user_info["user"] device_id = user_info["device_id"] + token_id = user_info["token_id"] ip_addr = self.hs.get_ip_from_request(request) user_agent = request.requestHeaders.getRawHeaders( @@ -317,7 +318,7 @@ class Auth(object): user_agent=user_agent ) - defer.returnValue((user, device_id)) + defer.returnValue((user, ClientID(device_id, token_id))) except KeyError: raise AuthError(403, "Missing access token.") @@ -342,6 +343,7 @@ class Auth(object): "admin": bool(ret.get("admin", False)), "device_id": ret.get("device_id"), "user": UserID.from_string(ret.get("name")), + "token_id": ret.get("token_id", None), } defer.returnValue(user_info) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 9c3271fe88..6fbd2af4ab 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -114,7 +114,8 @@ class MessageHandler(BaseHandler): defer.returnValue(chunk) @defer.inlineCallbacks - def create_and_send_event(self, event_dict, ratelimit=True): + def create_and_send_event(self, event_dict, ratelimit=True, + client=None, txn_id=None): """ Given a dict from a client, create and handle a new event. Creates an FrozenEvent object, filling out auth_events, prev_events, @@ -148,6 +149,15 @@ class MessageHandler(BaseHandler): builder.content ) + if client is not None: + if client.token_id is not None: + builder.internal_metadata.token_id = client.token_id + if client.device_id is not None: + builder.internal_metadata.device_id = client.device_id + + if txn_id is not None: + builder.internal_metadata.txn_id = txn_id + event, context = yield self._create_new_client_event( builder=builder, ) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 6cfce1a479..2ce754b028 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -31,7 +31,7 @@ class WhoisRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): target_user = UserID.from_string(user_id) - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) is_admin = yield self.auth.is_server_admin(auth_user) if not is_admin and target_user != auth_user: diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index ef853af411..8f65efec5f 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -45,7 +45,7 @@ class ClientDirectoryServer(ClientV1RestServlet): @defer.inlineCallbacks def on_PUT(self, request, room_alias): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) content = _parse_json(request) if not "room_id" in content: @@ -85,7 +85,7 @@ class ClientDirectoryServer(ClientV1RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, room_alias): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) is_admin = yield self.auth.is_server_admin(user) if not is_admin: diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index e58ee46fcd..77b7c25a03 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -34,7 +34,7 @@ class EventStreamRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) try: handler = self.handlers.event_stream_handler pagin_config = PaginationConfig.from_request(request) @@ -71,7 +71,7 @@ class EventRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, event_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) handler = self.handlers.event_handler event = yield handler.get_event(auth_user, event_id) diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 78d30abbf8..4a259bba64 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -25,7 +25,7 @@ class InitialSyncRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) with_feedback = "feedback" in request.args as_client_event = "raw" not in request.args pagination_config = PaginationConfig.from_request(request) diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 74669274a7..7feb4aadb1 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -32,7 +32,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) state = yield self.handlers.presence_handler.get_state( @@ -42,7 +42,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_PUT(self, request, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) state = {} @@ -77,7 +77,7 @@ class PresenceListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) if not self.hs.is_mine(user): @@ -97,7 +97,7 @@ class PresenceListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) if not self.hs.is_mine(user): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index f04abb2c26..15d6f3fc6c 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -37,7 +37,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_PUT(self, request, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) try: @@ -70,7 +70,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_PUT(self, request, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) try: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index c8c34b4801..410f19ccf6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -62,7 +62,7 @@ class RoomCreateRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) room_config = self.get_room_config(request) info = yield self.make_room(room_config, auth_user, None) @@ -125,7 +125,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id, event_type, state_key): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) msg_handler = self.handlers.message_handler data = yield msg_handler.get_room_data( @@ -142,8 +142,8 @@ class RoomStateEventRestServlet(ClientV1RestServlet): defer.returnValue((200, data.get_dict()["content"])) @defer.inlineCallbacks - def on_PUT(self, request, room_id, event_type, state_key): - user, device_id = yield self.auth.get_user_by_req(request) + def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): + user, client = yield self.auth.get_user_by_req(request) content = _parse_json(request) @@ -158,7 +158,9 @@ class RoomStateEventRestServlet(ClientV1RestServlet): event_dict["state_key"] = state_key msg_handler = self.handlers.message_handler - yield msg_handler.create_and_send_event(event_dict) + yield msg_handler.create_and_send_event( + event_dict, client=client, txn_id=txn_id, + ) defer.returnValue((200, {})) @@ -172,8 +174,8 @@ class RoomSendEventRestServlet(ClientV1RestServlet): register_txn_path(self, PATTERN, http_server, with_get=True) @defer.inlineCallbacks - def on_POST(self, request, room_id, event_type): - user, device_id = yield self.auth.get_user_by_req(request) + def on_POST(self, request, room_id, event_type, txn_id=None): + user, client = yield self.auth.get_user_by_req(request) content = _parse_json(request) msg_handler = self.handlers.message_handler @@ -183,7 +185,9 @@ class RoomSendEventRestServlet(ClientV1RestServlet): "content": content, "room_id": room_id, "sender": user.to_string(), - } + }, + client=client, + txn_id=txn_id, ) defer.returnValue((200, {"event_id": event.event_id})) @@ -200,7 +204,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): except KeyError: pass - response = yield self.on_POST(request, room_id, event_type) + response = yield self.on_POST(request, room_id, event_type, txn_id) self.txns.store_client_transaction(request, txn_id, response) defer.returnValue(response) @@ -215,8 +219,8 @@ class JoinRoomAliasServlet(ClientV1RestServlet): register_txn_path(self, PATTERN, http_server) @defer.inlineCallbacks - def on_POST(self, request, room_identifier): - user, device_id = yield self.auth.get_user_by_req(request) + def on_POST(self, request, room_identifier, txn_id=None): + user, client = yield self.auth.get_user_by_req(request) # the identifier could be a room alias or a room id. Try one then the # other if it fails to parse, without swallowing other valid @@ -245,7 +249,9 @@ class JoinRoomAliasServlet(ClientV1RestServlet): "room_id": identifier.to_string(), "sender": user.to_string(), "state_key": user.to_string(), - } + }, + client=client, + txn_id=txn_id, ) defer.returnValue((200, {"room_id": identifier.to_string()})) @@ -259,7 +265,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): except KeyError: pass - response = yield self.on_POST(request, room_identifier) + response = yield self.on_POST(request, room_identifier, txn_id) self.txns.store_client_transaction(request, txn_id, response) defer.returnValue(response) @@ -283,7 +289,7 @@ class RoomMemberListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) handler = self.handlers.room_member_handler members = yield handler.get_room_members_as_pagination_chunk( room_id=room_id, @@ -311,7 +317,7 @@ class RoomMessageListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) pagination_config = PaginationConfig.from_request( request, default_limit=10, ) @@ -335,7 +341,7 @@ class RoomStateRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) handler = self.handlers.message_handler # Get all the current state for this room events = yield handler.get_state_events( @@ -351,7 +357,7 @@ class RoomInitialSyncRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id): - user, device_id = yield self.auth.get_user_by_req(request) + user, client = yield self.auth.get_user_by_req(request) pagination_config = PaginationConfig.from_request(request) content = yield self.handlers.message_handler.room_initial_sync( room_id=room_id, @@ -395,8 +401,8 @@ class RoomMembershipRestServlet(ClientV1RestServlet): register_txn_path(self, PATTERN, http_server) @defer.inlineCallbacks - def on_POST(self, request, room_id, membership_action): - user, device_id = yield self.auth.get_user_by_req(request) + def on_POST(self, request, room_id, membership_action, txn_id=None): + user, client = yield self.auth.get_user_by_req(request) content = _parse_json(request) @@ -418,7 +424,9 @@ class RoomMembershipRestServlet(ClientV1RestServlet): "room_id": room_id, "sender": user.to_string(), "state_key": state_key, - } + }, + client=client, + txn_id=txn_id, ) defer.returnValue((200, {})) @@ -432,7 +440,9 @@ class RoomMembershipRestServlet(ClientV1RestServlet): except KeyError: pass - response = yield self.on_POST(request, room_id, membership_action) + response = yield self.on_POST( + request, room_id, membership_action, txn_id + ) self.txns.store_client_transaction(request, txn_id, response) defer.returnValue(response) @@ -444,8 +454,8 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): register_txn_path(self, PATTERN, http_server) @defer.inlineCallbacks - def on_POST(self, request, room_id, event_id): - user, device_id = yield self.auth.get_user_by_req(request) + def on_POST(self, request, room_id, event_id, txn_id=None): + user, client = yield self.auth.get_user_by_req(request) content = _parse_json(request) msg_handler = self.handlers.message_handler @@ -456,7 +466,9 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): "room_id": room_id, "sender": user.to_string(), "redacts": event_id, - } + }, + client=client, + txn_id=txn_id, ) defer.returnValue((200, {"event_id": event.event_id})) @@ -470,7 +482,7 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): except KeyError: pass - response = yield self.on_POST(request, room_id, event_id) + response = yield self.on_POST(request, room_id, event_id, txn_id) self.txns.store_client_transaction(request, txn_id, response) defer.returnValue(response) @@ -483,7 +495,7 @@ class RoomTypingRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_PUT(self, request, room_id, user_id): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) room_id = urllib.unquote(room_id) target_user = UserID.from_string(urllib.unquote(user_id)) diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 42d8e30bab..11d08fbced 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -28,7 +28,7 @@ class VoipRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) turnUris = self.hs.config.turn_uris turnSecret = self.hs.config.turn_shared_secret diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index 311ab89edb..22e26e3cd5 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -66,7 +66,7 @@ class ContentRepoResource(resource.Resource): @defer.inlineCallbacks def map_request_to_name(self, request): # auth the user - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) # namespace all file uploads on the user prefix = base64.urlsafe_b64encode( diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 6bed8a8efa..b939a30e19 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -42,7 +42,7 @@ class UploadResource(BaseMediaResource): @defer.inlineCallbacks def _async_render_POST(self, request): try: - auth_user, device_id = yield self.auth.get_user_by_req(request) + auth_user, client = yield self.auth.get_user_by_req(request) # TODO: The checks here are a bit late. The content will have # already been uploaded to a tmp file at this point content_length = request.getHeader("Content-Length") diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 75dffa4db2..029b07cc66 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -122,7 +122,8 @@ class RegistrationStore(SQLBaseStore): def _query_for_auth(self, txn, token): sql = ( - "SELECT users.name, users.admin, access_tokens.device_id" + "SELECT users.name, users.admin," + " access_tokens.device_id, access_tokens.id as token_id" " FROM users" " INNER JOIN access_tokens on users.id = access_tokens.user_id" " WHERE token = ?" diff --git a/synapse/types.py b/synapse/types.py index faac729ff2..46dbab5374 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -119,3 +119,6 @@ class StreamToken( d = self._asdict() d[key] = new_value return StreamToken(**d) + + +ClientID = namedtuple("ClientID", ("device_id", "token_id")) diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index a4f2abf213..f849120a3e 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -75,6 +75,7 @@ class PresenceStateTestCase(unittest.TestCase): "user": UserID.from_string(myid), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -165,6 +166,7 @@ class PresenceListTestCase(unittest.TestCase): "user": UserID.from_string(myid), "admin": False, "device_id": None, + "token_id": 1, } hs.handlers.room_member_handler = Mock( diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 76ed550b75..81ead10e76 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -70,6 +70,7 @@ class RoomPermissionsTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -466,6 +467,7 @@ class RoomsMemberListTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -555,6 +557,7 @@ class RoomsCreateTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -657,6 +660,7 @@ class RoomTopicTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -773,6 +777,7 @@ class RoomMemberStateTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -909,6 +914,7 @@ class RoomMessagesTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token @@ -1013,6 +1019,7 @@ class RoomInitialSyncTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index c89b37d004..c5d5b06da3 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -73,6 +73,7 @@ class RoomTypingTestCase(RestTestCase): "user": UserID.from_string(self.auth_user_id), "admin": False, "device_id": None, + "token_id": 1, } hs.get_auth().get_user_by_token = _get_user_by_token diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 84bfde7568..6f8bea2f61 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -53,7 +53,10 @@ class RegistrationStoreTestCase(unittest.TestCase): ) self.assertEquals( - {"admin": 0, "device_id": None, "name": self.user_id}, + {"admin": 0, + "device_id": None, + "name": self.user_id, + "token_id": 1}, (yield self.store.get_user_by_token(self.tokens[0])) ) @@ -63,7 +66,10 @@ class RegistrationStoreTestCase(unittest.TestCase): yield self.store.add_access_token_to_user(self.user_id, self.tokens[1]) self.assertEquals( - {"admin": 0, "device_id": None, "name": self.user_id}, + {"admin": 0, + "device_id": None, + "name": self.user_id, + "token_id": 2}, (yield self.store.get_user_by_token(self.tokens[1])) ) -- cgit 1.5.1 From 11634017f47779d784325da5513517ad76b0dbc1 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 28 Jan 2015 17:42:19 +0000 Subject: s/definition/filter_json/ since definition is now used to mean a component of the filter, rather than the complete json --- synapse/storage/filtering.py | 4 ++-- synapse/storage/schema/filtering.sql | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index bab68a9eef..cb01c2040f 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -33,7 +33,7 @@ class FilteringStore(SQLBaseStore): "user_id": user_localpart, "filter_id": filter_id, }, - retcol="definition", + retcol="filter_json", allow_none=False, ) @@ -57,7 +57,7 @@ class FilteringStore(SQLBaseStore): filter_id = max_id + 1 sql = ( - "INSERT INTO user_filters (user_id, filter_id, definition)" + "INSERT INTO user_filters (user_id, filter_id, filter_json)" "VALUES(?, ?, ?)" ) txn.execute(sql, (user_localpart, filter_id, def_json)) diff --git a/synapse/storage/schema/filtering.sql b/synapse/storage/schema/filtering.sql index 795aca4afd..beb39ca201 100644 --- a/synapse/storage/schema/filtering.sql +++ b/synapse/storage/schema/filtering.sql @@ -15,7 +15,7 @@ CREATE TABLE IF NOT EXISTS user_filters( user_id TEXT, filter_id INTEGER, - definition TEXT, + filter_json TEXT, FOREIGN KEY(user_id) REFERENCES users(id) ); -- cgit 1.5.1 From d5bdf3c0c7958e6a080f9ec4b38a51428717d02a Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Jan 2015 18:06:04 +0000 Subject: Allow the push rule delete method to take more specifiers. --- synapse/storage/push_rule.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 0342996ed1..c7b553292e 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -175,14 +175,17 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, new_rule.values()) @defer.inlineCallbacks - def delete_push_rule(self, user_name, rule_id): - yield self._simple_delete_one( - PushRuleTable.table_name, - { - 'user_name': user_name, - 'rule_id': rule_id - } - ) + def delete_push_rule(self, user_name, rule_id, **kwargs): + """ + Delete a push rule. Args specify the row to be deleted and can be + any of the columns in the push_rule table, but below are the + standard ones + + Args: + user_name (str): The matrix ID of the push rule owner + rule_id (str): The rule_id of the rule to be deleted + """ + yield self._simple_delete_one(PushRuleTable.table_name, kwargs) class RuleNotFoundException(Exception): -- cgit 1.5.1 From b0b80074e04dcb2b70dff56b0368060a19c065d3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 29 Jan 2015 01:48:36 +0000 Subject: SYN-252: Supply the stream and topological parts in the correct order to the constructor --- synapse/storage/stream.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 8ac2adab05..062ca06fb3 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -82,10 +82,10 @@ class _StreamToken(namedtuple("_StreamToken", "topological stream")): def parse(cls, string): try: if string[0] == 's': - return cls(None, int(string[1:])) + return cls(topological=None, stream=int(string[1:])) if string[0] == 't': parts = string[1:].split('-', 1) - return cls(int(parts[1]), int(parts[0])) + return cls(topological=int(parts[0]), stream=int(parts[1])) except: pass raise SynapseError(400, "Invalid token %r" % (string,)) @@ -94,7 +94,7 @@ class _StreamToken(namedtuple("_StreamToken", "topological stream")): def parse_stream_token(cls, string): try: if string[0] == 's': - return cls(None, int(string[1:])) + return cls(topological=None, stream=int(string[1:])) except: pass raise SynapseError(400, "Invalid token %r" % (string,)) -- cgit 1.5.1 From 3773759c0f1e25e6905e23368f770da99ceb3ea0 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 29 Jan 2015 09:15:33 +0000 Subject: Also edit the filter column on the delta SQL --- synapse/storage/schema/delta/v12.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql index 795aca4afd..beb39ca201 100644 --- a/synapse/storage/schema/delta/v12.sql +++ b/synapse/storage/schema/delta/v12.sql @@ -15,7 +15,7 @@ CREATE TABLE IF NOT EXISTS user_filters( user_id TEXT, filter_id INTEGER, - definition TEXT, + filter_json TEXT, FOREIGN KEY(user_id) REFERENCES users(id) ); -- cgit 1.5.1 From e016f4043b81ffdedf71c4459772f66757386e44 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 29 Jan 2015 14:40:28 +0000 Subject: Use get_room_events_stream to get changes to the rooms if the number of changes is small --- synapse/handlers/sync.py | 56 +++++++++++++++++++++++++++++++++++++---------- synapse/storage/stream.py | 7 ++++++ 2 files changed, 52 insertions(+), 11 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9e1188da56..e93dfe005d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -216,23 +216,57 @@ class SyncHandler(BaseHandler): typing_by_room = {event["room_id"]: event for event in typing} logger.debug("Typing %r", typing_by_room) - room_list = yield self.store.get_rooms_for_user_where_membership_is( - user_id=sync_config.user.to_string(), - membership_list=[Membership.INVITE, Membership.JOIN] - ) + rm_handler = self.hs.get_handlers().room_member_handler + room_ids = yield rm_handler.get_rooms_for_user(sync_config.user) # TODO (mjark): Does public mean "published"? published_rooms = yield self.store.get_rooms(is_public=True) published_room_ids = set(r["room_id"] for r in published_rooms) + room_events, _ = yield self.store.get_room_events_stream( + sync_config.user.to_string(), + from_key=since_token.room_key, + to_key=now_token.room_key, + room_id=None, + limit=sync_config.limit + 1, + ) + rooms = [] - for event in room_list: - room_sync = yield self.incremental_sync_with_gap_for_room( - event.room_id, sync_config, since_token, now_token, - published_room_ids, typing_by_room - ) - if room_sync: - rooms.append(room_sync) + if len(room_events) <= sync_config.limit: + # There is no gap in any of the rooms. Therefore we can just + # partition the new events by room and return them. + events_by_room_id = {} + for event in room_events: + events_by_room_id.setdefault(event.room_id, []).append(event) + + for room_id in room_ids: + recents = events_by_room_id.get(room_id, []) + state = [event for event in recents if event.is_state()] + if recents: + prev_batch = now_token.copy_and_replace( + "room_key", recents[0].internal_metadata.before + ) + else: + prev_batch = now_token + room_sync = RoomSyncResult( + room_id=room_id, + published=room_id in published_room_ids, + events=recents, + prev_batch=prev_batch, + state=state, + limited=False, + typing=typing_by_room.get(room_id, None) + ) + if room_sync is not None: + rooms.append(room_sync) + else: + for room_id in room_ids: + room_sync = yield self.incremental_sync_with_gap_for_room( + room_id, sync_config, since_token, now_token, + published_room_ids, typing_by_room + ) + if room_sync: + rooms.append(room_sync) defer.returnValue(SyncResult( public_user_data=presence, diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index db1816ea84..93ccfd8c10 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -181,6 +181,13 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) + for event, row in zip(ret, rows): + stream = row["stream_ordering"] + topo = event.depth + internal = event.internal_metadata + internal.before = str(_StreamToken(topo, stream - 1)) + internal.after = str(_StreamToken(topo, stream)) + if rows: key = "s%d" % max([r["stream_ordering"] for r in rows]) else: -- cgit 1.5.1 From e4f50fa0aa3426a272b1526072c4c42802989ba4 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 29 Jan 2015 14:53:18 +0000 Subject: Move bump schema delta --- synapse/storage/schema/delta/v12.sql | 24 ------------------------ synapse/storage/schema/delta/v13.sql | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 24 deletions(-) delete mode 100644 synapse/storage/schema/delta/v12.sql create mode 100644 synapse/storage/schema/delta/v13.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql deleted file mode 100644 index beb39ca201..0000000000 --- a/synapse/storage/schema/delta/v12.sql +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS user_filters( - user_id TEXT, - filter_id INTEGER, - filter_json TEXT, - FOREIGN KEY(user_id) REFERENCES users(id) -); - -CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( - user_id, filter_id -); diff --git a/synapse/storage/schema/delta/v13.sql b/synapse/storage/schema/delta/v13.sql new file mode 100644 index 0000000000..beb39ca201 --- /dev/null +++ b/synapse/storage/schema/delta/v13.sql @@ -0,0 +1,24 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS user_filters( + user_id TEXT, + filter_id INTEGER, + filter_json TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( + user_id, filter_id +); -- cgit 1.5.1 From acb68a39e02f405c116135400e33a3b1940a07f8 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 29 Jan 2015 16:10:35 +0000 Subject: Code style fixes. --- synapse/api/errors.py | 1 + synapse/push/__init__.py | 15 +++++++-------- synapse/push/httppusher.py | 8 ++++---- synapse/push/pusherpool.py | 2 +- synapse/rest/__init__.py | 2 +- synapse/rest/client/v1/push_rule.py | 29 ++++++++++++++++++++++------- synapse/storage/push_rule.py | 9 +++++---- synapse/storage/pusher.py | 2 +- 8 files changed, 42 insertions(+), 26 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 5872e82d0f..ad478aa6b7 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -111,6 +111,7 @@ class NotFoundError(SynapseError): **kwargs ) + class AuthError(SynapseError): """An error raised when there was a problem authorising an event.""" diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index fa967c5a5d..472ede5480 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -189,8 +189,8 @@ class Pusher(object): # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( - ("Ignoring rejected pushkey %s because we " - "didn't send it"), pk + ("Ignoring rejected pushkey %s because we" + " didn't send it"), pk ) else: logger.info( @@ -236,8 +236,7 @@ class Pusher(object): # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", - self.user_name, self.pushkey - ) + self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token( @@ -258,8 +257,7 @@ class Pusher(object): "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, - self.backoff_delay - ) + self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: @@ -299,7 +297,6 @@ class Pusher(object): self.has_unread = False - def _value_for_dotted_key(dotted_key, event): parts = dotted_key.split(".") val = event @@ -310,6 +307,7 @@ def _value_for_dotted_key(dotted_key, event): parts = parts[1:] return val + def _tweaks_for_actions(actions): tweaks = {} for a in actions: @@ -319,6 +317,7 @@ def _tweaks_for_actions(actions): tweaks['sound'] = a['set_sound'] return tweaks + class PusherConfigException(Exception): def __init__(self, msg): - super(PusherConfigException, self).__init__(msg) \ No newline at end of file + super(PusherConfigException, self).__init__(msg) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index e12b946727..ab128e31e5 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -71,11 +71,11 @@ class HttpPusher(Pusher): # we may have to fetch this over federation and we # can't trust it anyway: is it worth it? #'from_display_name': 'Steve Stevington' - 'counts': { #-- we don't mark messages as read yet so - # we have no way of knowing + 'counts': { # -- we don't mark messages as read yet so + # we have no way of knowing # Just set the badge to 1 until we have read receipts 'unread': 1, - # 'missed_calls': 2 + # 'missed_calls': 2 }, 'devices': [ { @@ -142,4 +142,4 @@ class HttpPusher(Pusher): rejected = [] if 'rejected' in resp: rejected = resp['rejected'] - defer.returnValue(rejected) \ No newline at end of file + defer.returnValue(rejected) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 856defedac..4892c21e7b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -149,4 +149,4 @@ class PusherPool: logger.info("Stopping pusher %s", fullid) self.pushers[fullid].stop() del self.pushers[fullid] - yield self.store.delete_pusher_by_app_id_pushkey(app_id, pushkey) \ No newline at end of file + yield self.store.delete_pusher_by_app_id_pushkey(app_id, pushkey) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 90afd93333..1a84d94cd9 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -11,4 +11,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file +# limitations under the License. diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 64743a2f46..2b1e930326 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -30,9 +30,9 @@ class PushRuleRestServlet(ClientV1RestServlet): 'sender': 1, 'room': 2, 'content': 3, - 'override': 4 + 'override': 4, } - PRIORITY_CLASS_INVERSE_MAP = {v: k for k,v in PRIORITY_CLASS_MAP.items()} + PRIORITY_CLASS_INVERSE_MAP = {v: k for k, v in PRIORITY_CLASS_MAP.items()} SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( "Unrecognised request: You probably wanted a trailing slash") @@ -260,7 +260,9 @@ class PushRuleRestServlet(ClientV1RestServlet): if path == []: # we're a reference impl: pedantry is our job. - raise UnrecognizedRequestError(PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR) + raise UnrecognizedRequestError( + PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR + ) if path[0] == '': defer.returnValue((200, rules)) @@ -271,7 +273,9 @@ class PushRuleRestServlet(ClientV1RestServlet): elif path[0] == 'device': path = path[1:] if path == []: - raise UnrecognizedRequestError(PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR) + raise UnrecognizedRequestError( + PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR + ) if path[0] == '': defer.returnValue((200, rules['device'])) @@ -290,11 +294,13 @@ class PushRuleRestServlet(ClientV1RestServlet): def on_OPTIONS(self, _): return 200, {} + def _add_empty_priority_class_arrays(d): for pc in PushRuleRestServlet.PRIORITY_CLASS_MAP.keys(): d[pc] = [] return d + def _instance_handle_from_conditions(conditions): """ Given a list of conditions, return the instance handle of the @@ -305,9 +311,12 @@ def _instance_handle_from_conditions(conditions): return c['instance_handle'] return None + def _filter_ruleset_with_path(ruleset, path): if path == []: - raise UnrecognizedRequestError(PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR) + raise UnrecognizedRequestError( + PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR + ) if path[0] == '': return ruleset @@ -316,7 +325,9 @@ def _filter_ruleset_with_path(ruleset, path): raise UnrecognizedRequestError() path = path[1:] if path == []: - raise UnrecognizedRequestError(PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR) + raise UnrecognizedRequestError( + PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR + ) if path[0] == '': return ruleset[template_kind] rule_id = path[0] @@ -325,6 +336,7 @@ def _filter_ruleset_with_path(ruleset, path): return r raise NotFoundError + def _priority_class_from_spec(spec): if spec['template'] not in PushRuleRestServlet.PRIORITY_CLASS_MAP.keys(): raise InvalidRuleException("Unknown template: %s" % (spec['kind'])) @@ -335,6 +347,7 @@ def _priority_class_from_spec(spec): return pc + def _priority_class_to_template_name(pc): if pc > PushRuleRestServlet.PRIORITY_CLASS_MAP['override']: # per-device @@ -343,6 +356,7 @@ def _priority_class_to_template_name(pc): else: return PushRuleRestServlet.PRIORITY_CLASS_INVERSE_MAP[pc] + def _rule_to_template(rule): template_name = _priority_class_to_template_name(rule['priority_class']) if template_name in ['override', 'underride']: @@ -359,8 +373,9 @@ def _rule_to_template(rule): ret["pattern"] = thecond["pattern"] return ret + def _strip_device_condition(rule): - for i,c in enumerate(rule['conditions']): + for i, c in enumerate(rule['conditions']): if c['kind'] == 'device': del rule['conditions'][i] return rule diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index c7b553292e..27502d2399 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -117,7 +117,7 @@ class PushRuleStore(SQLBaseStore): new_rule['priority'] = new_rule_priority sql = ( - "SELECT COUNT(*) FROM "+PushRuleTable.table_name+ + "SELECT COUNT(*) FROM " + PushRuleTable.table_name + " WHERE user_name = ? AND priority_class = ? AND priority = ?" ) txn.execute(sql, (user_name, priority_class, new_rule_priority)) @@ -146,10 +146,11 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, new_rule.values()) - def _add_push_rule_highest_priority_txn(self, txn, user_name, priority_class, **kwargs): + def _add_push_rule_highest_priority_txn(self, txn, user_name, + priority_class, **kwargs): # find the highest priority rule in that class sql = ( - "SELECT COUNT(*), MAX(priority) FROM "+PushRuleTable.table_name+ + "SELECT COUNT(*), MAX(priority) FROM " + PushRuleTable.table_name + " WHERE user_name = ? and priority_class = ?" ) txn.execute(sql, (user_name, priority_class)) @@ -209,4 +210,4 @@ class PushRuleTable(Table): "actions", ] - EntryType = collections.namedtuple("PushRuleEntry", fields) \ No newline at end of file + EntryType = collections.namedtuple("PushRuleEntry", fields) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 113cdc8a8e..f253c9e2c3 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -170,4 +170,4 @@ class PushersTable(Table): "failing_since" ] - EntryType = collections.namedtuple("PusherEntry", fields) \ No newline at end of file + EntryType = collections.namedtuple("PusherEntry", fields) -- cgit 1.5.1 From 78015948a7febb18e000651f72f8f58830a55b93 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Jan 2015 16:50:23 +0000 Subject: Initial implementation of auth conflict resolution --- synapse/events/utils.py | 6 +- synapse/federation/federation_client.py | 2 +- synapse/federation/federation_server.py | 33 +++++ synapse/federation/transport/client.py | 16 +++ synapse/federation/transport/server.py | 21 +++- synapse/handlers/federation.py | 207 ++++++++++++++++++++------------ synapse/storage/rejections.py | 4 +- tests/handlers/test_federation.py | 2 + 8 files changed, 210 insertions(+), 81 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/events/utils.py b/synapse/events/utils.py index bcb5457278..10a6b9f264 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -45,12 +45,14 @@ def prune_event(event): "membership", ] + event_dict = event.get_dict() + new_content = {} def add_fields(*fields): for field in fields: if field in event.content: - new_content[field] = event.content[field] + new_content[field] = event_dict["content"][field] if event_type == EventTypes.Member: add_fields("membership") @@ -75,7 +77,7 @@ def prune_event(event): allowed_fields = { k: v - for k, v in event.get_dict().items() + for k, v in event_dict.items() if k in allowed_keys } diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index ebcd593506..1173ca817b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -345,7 +345,7 @@ class FederationClient(object): "auth_chain": [e.get_pdu_json(time_now) for e in local_auth], } - code, content = yield self.transport_layer.send_invite( + code, content = yield self.transport_layer.send_query_auth( destination=destination, room_id=room_id, event_id=event_id, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index fc5342afaa..8cff4e6472 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -230,6 +230,39 @@ class FederationServer(object): "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus], })) + @defer.inlineCallbacks + def on_query_auth_request(self, origin, content, event_id): + auth_chain = [ + (yield self._check_sigs_and_hash(self.event_from_pdu_json(e))) + for e in content["auth_chain"] + ] + + missing = [ + (yield self._check_sigs_and_hash(self.event_from_pdu_json(e))) + for e in content.get("missing", []) + ] + + ret = yield self.handler.on_query_auth( + origin, event_id, auth_chain, content.get("rejects", []), missing + ) + + time_now = self._clock.time_msec() + send_content = { + "auth_chain": [ + e.get_pdu_json(time_now) + for e in ret["auth_chain"] + ], + "rejects": content.get("rejects", []), + "missing": [ + e.get_pdu_json(time_now) + for e in ret.get("missing", []) + ], + } + + defer.returnValue( + (200, send_content) + ) + @log_function def _get_persisted_pdu(self, origin, event_id, do_auth=True): """ Get a PDU from the database with given origin and id. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index e634a3a213..4cb1dea2de 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -213,3 +213,19 @@ class TransportLayerClient(object): ) defer.returnValue(response) + + @defer.inlineCallbacks + @log_function + def send_query_auth(self, destination, room_id, event_id, content): + path = PREFIX + "/query_auth/%s/%s" % (room_id, event_id) + + code, content = yield self.client.post_json( + destination=destination, + path=path, + data=content, + ) + + if not 200 <= code < 300: + raise RuntimeError("Got %d from send_invite", code) + + defer.returnValue(json.loads(content)) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a380a6910b..9c9f8d525b 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -42,7 +42,7 @@ class TransportLayerServer(object): content = None origin = None - if request.method == "PUT": + if request.method in ["PUT", "POST"]: # TODO: Handle other method types? other content types? try: content_bytes = request.content.read() @@ -234,6 +234,16 @@ class TransportLayerServer(object): ) ) ) + self.server.register_path( + "POST", + re.compile("^" + PREFIX + "/query_auth/([^/]*)/([^/]*)$"), + self._with_authentication( + lambda origin, content, query, context, event_id: + self._on_query_auth_request( + origin, content, event_id, + ) + ) + ) @defer.inlineCallbacks @log_function @@ -325,3 +335,12 @@ class TransportLayerServer(object): ) defer.returnValue((200, content)) + + @defer.inlineCallbacks + @log_function + def _on_query_auth_request(self, origin, content, event_id): + new_content = yield self.request_handler.on_query_auth_request( + origin, content, event_id + ) + + defer.returnValue((200, new_content)) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 97e3c503b9..14c26d8cea 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -126,7 +126,7 @@ class FederationHandler(BaseHandler): if not state: state, auth_chain = yield replication.get_state_for_room( - origin, context=event.room_id, event_id=event.event_id, + origin, room_id=event.room_id, event_id=event.event_id, ) if not auth_chain: @@ -139,7 +139,7 @@ class FederationHandler(BaseHandler): for e in auth_chain: e.internal_metadata.outlier = True try: - yield self._handle_new_event(e, fetch_auth_from=origin) + yield self._handle_new_event(origin, e) except: logger.exception( "Failed to handle auth event %s", @@ -152,7 +152,7 @@ class FederationHandler(BaseHandler): for e in state: e.internal_metadata.outlier = True try: - yield self._handle_new_event(e) + yield self._handle_new_event(origin, e) except: logger.exception( "Failed to handle state event %s", @@ -161,6 +161,7 @@ class FederationHandler(BaseHandler): try: yield self._handle_new_event( + origin, event, state=state, backfilled=backfilled, @@ -363,7 +364,14 @@ class FederationHandler(BaseHandler): for e in auth_chain: e.internal_metadata.outlier = True try: - yield self._handle_new_event(e) + auth_ids = [e_id for e_id, _ in e.auth_events] + auth = { + (e.type, e.state_key): e for e in auth_chain + if e.event_id in auth_ids + } + yield self._handle_new_event( + target_host, e, auth_events=auth + ) except: logger.exception( "Failed to handle auth event %s", @@ -374,8 +382,13 @@ class FederationHandler(BaseHandler): # FIXME: Auth these. e.internal_metadata.outlier = True try: + auth_ids = [e_id for e_id, _ in e.auth_events] + auth = { + (e.type, e.state_key): e for e in auth_chain + if e.event_id in auth_ids + } yield self._handle_new_event( - e, fetch_auth_from=target_host + target_host, e, auth_events=auth ) except: logger.exception( @@ -384,6 +397,7 @@ class FederationHandler(BaseHandler): ) yield self._handle_new_event( + target_host, new_event, state=state, current_state=state, @@ -450,7 +464,7 @@ class FederationHandler(BaseHandler): event.internal_metadata.outlier = False - context = yield self._handle_new_event(event) + context = yield self._handle_new_event(origin, event) logger.debug( "on_send_join_request: After _handle_new_event: %s, sigs: %s", @@ -651,11 +665,12 @@ class FederationHandler(BaseHandler): waiters.pop().callback(None) @defer.inlineCallbacks - def _handle_new_event(self, event, state=None, backfilled=False, - current_state=None, fetch_auth_from=None): + @log_function + def _handle_new_event(self, origin, event, state=None, backfilled=False, + current_state=None, auth_events=None): logger.debug( - "_handle_new_event: Before annotate: %s, sigs: %s", + "_handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) @@ -663,62 +678,34 @@ class FederationHandler(BaseHandler): event, old_state=state ) + if not auth_events: + auth_events = context.auth_events + logger.debug( - "_handle_new_event: Before auth fetch: %s, sigs: %s", - event.event_id, event.signatures, + "_handle_new_event: %s, auth_events: %s", + event.event_id, auth_events, ) is_new_state = not event.internal_metadata.is_outlier() - known_ids = set( - [s.event_id for s in context.auth_events.values()] - ) - - for e_id, _ in event.auth_events: - if e_id not in known_ids: - e = yield self.store.get_event(e_id, allow_none=True) - - if not e and fetch_auth_from is not None: - # Grab the auth_chain over federation if we are missing - # auth events. - auth_chain = yield self.replication_layer.get_event_auth( - fetch_auth_from, event.event_id, event.room_id - ) - for auth_event in auth_chain: - yield self._handle_new_event(auth_event) - e = yield self.store.get_event(e_id, allow_none=True) - - if not e: - # TODO: Do some conflict res to make sure that we're - # not the ones who are wrong. - logger.info( - "Rejecting %s as %s not in db or %s", - event.event_id, e_id, known_ids, - ) - # FIXME: How does raising AuthError work with federation? - raise AuthError(403, "Cannot find auth event") - - context.auth_events[(e.type, e.state_key)] = e - - logger.debug( - "_handle_new_event: Before hack: %s, sigs: %s", - event.event_id, event.signatures, - ) - + # This is a hack to fix some old rooms where the initial join event + # didn't reference the create event in its auth events. if event.type == EventTypes.Member and not event.auth_events: if len(event.prev_events) == 1: c = yield self.store.get_event(event.prev_events[0][0]) if c.type == EventTypes.Create: - context.auth_events[(c.type, c.state_key)] = c - - logger.debug( - "_handle_new_event: Before auth check: %s, sigs: %s", - event.event_id, event.signatures, - ) + auth_events[(c.type, c.state_key)] = c try: - self.auth.check(event, auth_events=context.auth_events) - except AuthError: + yield self.do_auth( + origin, event, context, auth_events=auth_events + ) + except AuthError as e: + logger.warn( + "Rejecting %s because %s", + event.event_id, e.msg + ) + # TODO: Store rejection. context.rejected = RejectedReason.AUTH_ERROR @@ -731,11 +718,6 @@ class FederationHandler(BaseHandler): ) raise - logger.debug( - "_handle_new_event: Before persist_event: %s, sigs: %s", - event.event_id, event.signatures, - ) - yield self.store.persist_event( event, context=context, @@ -744,25 +726,73 @@ class FederationHandler(BaseHandler): current_state=current_state, ) - logger.debug( - "_handle_new_event: After persist_event: %s, sigs: %s", - event.event_id, event.signatures, + defer.returnValue(context) + + @defer.inlineCallbacks + def on_query_auth(self, origin, event_id, remote_auth_chain, rejects, + missing): + # Just go through and process each event in `remote_auth_chain`. We + # don't want to fall into the trap of `missing` being wrong. + for e in remote_auth_chain: + try: + yield self._handle_new_event(origin, e) + except AuthError: + pass + + # Now get the current auth_chain for the event. + local_auth_chain = yield self.store.get_auth_chain([event_id]) + + # TODO: Check if we would now reject event_id. If so we need to tell + # everyone. + + ret = yield self.construct_auth_difference( + local_auth_chain, remote_auth_chain ) - defer.returnValue(context) + logger.debug("on_query_auth reutrning: %s", ret) + + defer.returnValue(ret) @defer.inlineCallbacks - def do_auth(self, origin, event, context): - for e_id, _ in event.auth_events: - pass + @log_function + def do_auth(self, origin, event, context, auth_events): + # Check if we have all the auth events. + res = yield self.store.have_events( + [e_id for e_id, _ in event.auth_events] + ) - auth_events = set(e_id for e_id, _ in event.auth_events) - current_state = set(e.event_id for e in context.auth_events.values()) + event_auth_events = set(e_id for e_id, _ in event.auth_events) + seen_events = set(res.keys()) - missing_auth = auth_events - current_state + missing_auth = event_auth_events - seen_events if missing_auth: + logger.debug("Missing auth: %s", missing_auth) + # If we don't have all the auth events, we need to get them. + remote_auth_chain = yield self.replication_layer.get_event_auth( + origin, event.room_id, event.event_id + ) + + for e in remote_auth_chain: + try: + auth_ids = [e_id for e_id, _ in e.auth_events] + auth = { + (e.type, e.state_key): e for e in remote_auth_chain + if e.event_id in auth_ids + } + yield self._handle_new_event( + origin, e, auth_events=auth + ) + auth_events[(e.type, e.state_key)] = e + except AuthError: + pass + + current_state = set(e.event_id for e in auth_events.values()) + different_auth = event_auth_events - current_state + + if different_auth and not event.internal_metadata.is_outlier(): # Do auth conflict res. + logger.debug("Different auth: %s", different_auth) # 1. Get what we think is the auth chain. auth_ids = self.auth.compute_auth_events(event, context) @@ -778,14 +808,24 @@ class FederationHandler(BaseHandler): # 3. Process any remote auth chain events we haven't seen. for e in result.get("missing", []): - # TODO. - pass + try: + auth_ids = [e_id for e_id, _ in e.auth_events] + auth = { + (e.type, e.state_key): e for e in result["auth_chain"] + if e.event_id in auth_ids + } + yield self._handle_new_event( + origin, e, auth_events=auth + ) + auth_events[(e.type, e.state_key)] = e + except AuthError: + pass # 4. Look at rejects and their proofs. # TODO. try: - self.auth.check(event, auth_events=context.auth_events) + self.auth.check(event, auth_events=auth_events) except AuthError: raise @@ -802,12 +842,16 @@ class FederationHandler(BaseHandler): dict """ + logger.debug("construct_auth_difference Start!") + # TODO: Make sure we are OK with local_auth or remote_auth having more # auth events in them than strictly necessary. def sort_fun(ev): return ev.depth, ev.event_id + logger.debug("construct_auth_difference after sort_fun!") + # We find the differences by starting at the "bottom" of each list # and iterating up on both lists. The lists are ordered by depth and # then event_id, we iterate up both lists until we find the event ids @@ -823,11 +867,18 @@ class FederationHandler(BaseHandler): local_iter = iter(local_list) remote_iter = iter(remote_list) - current_local = local_iter.next() - current_remote = remote_iter.next() + logger.debug("construct_auth_difference before get_next!") def get_next(it, opt=None): - return it.next() if it.has_next() else opt + try: + return it.next() + except: + return opt + + current_local = get_next(local_iter) + current_remote = get_next(remote_iter) + + logger.debug("construct_auth_difference before while") missing_remotes = [] missing_locals = [] @@ -867,6 +918,8 @@ class FederationHandler(BaseHandler): current_remote = get_next(remote_iter) continue + logger.debug("construct_auth_difference after while") + # missing locals should be sent to the server # We should find why we are missing remotes, as they will have been # rejected. @@ -886,6 +939,7 @@ class FederationHandler(BaseHandler): reason = yield self.store.get_rejection_reason(e.event_id) if reason is None: # FIXME: ERRR?! + logger.warn("Could not find reason for %s", e.event_id) raise RuntimeError("") reason_map[e.event_id] = reason @@ -899,7 +953,10 @@ class FederationHandler(BaseHandler): # TODO: Get proof. pass + logger.debug("construct_auth_difference returning") + defer.returnValue({ + "auth_chain": local_auth, "rejects": { e.event_id: { "reason": reason_map[e.event_id], diff --git a/synapse/storage/rejections.py b/synapse/storage/rejections.py index b7249700d7..4e1a9a2783 100644 --- a/synapse/storage/rejections.py +++ b/synapse/storage/rejections.py @@ -28,12 +28,12 @@ class RejectionsStore(SQLBaseStore): values={ "event_id": event_id, "reason": reason, - "last_failure": self._clock.time_msec(), + "last_check": self._clock.time_msec(), } ) def get_rejection_reason(self, event_id): - self._simple_select_one_onecol( + return self._simple_select_one_onecol( table="rejections", retcol="reason", keyvalues={ diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index ed21defd13..44dbce6bea 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -52,6 +52,7 @@ class FederationTestCase(unittest.TestCase): "get_room", "get_destination_retry_timings", "set_destination_retry_timings", + "have_events", ]), resource_for_federation=NonCallableMock(), http_client=NonCallableMock(spec_set=[]), @@ -90,6 +91,7 @@ class FederationTestCase(unittest.TestCase): self.datastore.persist_event.return_value = defer.succeed(None) self.datastore.get_room.return_value = defer.succeed(True) self.auth.check_host_in_room.return_value = defer.succeed(True) + self.datastore.have_events.return_value = defer.succeed({}) def annotate(ev, old_state=None): context = Mock() -- cgit 1.5.1 From 22dd1cde2d83a2448074816108b85d1957315236 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 30 Jan 2015 11:32:35 +0000 Subject: Filter the recent events before applying the limit when doing an incremental sync with a gap --- synapse/api/filtering.py | 2 -- synapse/handlers/sync.py | 53 ++++++++++++++++++++++++++---------- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/storage/stream.py | 21 ++++++++++---- 4 files changed, 54 insertions(+), 24 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index b7e5d3222f..fa4de2614d 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -12,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.api.errors import SynapseError from synapse.types import UserID, RoomID diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5768702192..0df1851b0e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -278,6 +278,40 @@ class SyncHandler(BaseHandler): next_batch=now_token, )) + @defer.inlineCallbacks + def load_filtered_recents(self, room_id, sync_config, since_token, + now_token): + limited = True + recents = [] + filtering_factor = 2 + load_limit = max(sync_config.limit * filtering_factor, 100) + max_repeat = 3 # Only try a few times per room, otherwise + room_key = now_token.room_key + + while limited and len(recents) < sync_config.limit and max_repeat: + events, room_key = yield self.store.get_recent_events_for_room( + room_id, + limit=load_limit + 1, + from_token=since_token.room_key, + end_token=room_key, + ) + loaded_recents = sync_config.filter.filter_room_events(events) + loaded_recents.extend(recents) + recents = loaded_recents + if len(events) <= load_limit: + limited = False + max_repeat -= 1 + + if len(recents) > sync_config.limit: + recents = recents[-sync_config.limit:] + room_key = recents[0].internal_metadata.before + + prev_batch_token = now_token.copy_and_replace( + "room_key", room_key + ) + + defer.returnValue((recents, prev_batch_token, limited)) + @defer.inlineCallbacks def incremental_sync_with_gap_for_room(self, room_id, sync_config, since_token, now_token, @@ -288,28 +322,17 @@ class SyncHandler(BaseHandler): Returns: A Deferred RoomSyncResult """ + # TODO(mjark): Check if they have joined the room between # the previous sync and this one. - # TODO(mjark): Apply the event filter in sync_config taking care to get - # enough events to reach the limit # TODO(mjark): Check for redactions we might have missed. - recents, token = yield self.store.get_recent_events_for_room( - room_id, - limit=sync_config.limit + 1, - from_token=since_token.room_key, - end_token=now_token.room_key, + + recents, prev_batch_token, limited = self.load_filtered_recents( + room_id, sync_config, since_token, ) logging.debug("Recents %r", recents) - if len(recents) > sync_config.limit: - limited = True - recents = recents[1:] - else: - limited = False - - prev_batch_token = now_token.copy_and_replace("room_key", token[0]) - # TODO(mjark): This seems racy since this isn't being passed a # token to indicate what point in the stream this is current_state_events = yield self.state_handler.get_current_state( diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index c1277d2675..46ea50d118 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -116,7 +116,7 @@ class SyncRestServlet(RestServlet): user.localpart, filter_id ) except: - filter = Filter({}) + filter = Filter({}) # filter = filter.apply_overrides(http_request) #if filter.matches(event): # # stuff diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 2ea5e1a021..73504c8b52 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -181,15 +181,11 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) - for event, row in zip(ret, rows): - stream = row["stream_ordering"] - topo = event.depth - internal = event.internal_metadata - internal.before = str(_StreamToken(topo, stream - 1)) - internal.after = str(_StreamToken(topo, stream)) + self._set_before_and_after(ret, rows) if rows: key = "s%d" % max([r["stream_ordering"] for r in rows]) + else: # Assume we didn't get anything because there was nothing to # get. @@ -267,6 +263,8 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) + self._set_before_and_after(events, rows) + return events, next_token, return self.runInteraction("paginate_room_events", f) @@ -328,6 +326,8 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) + self._set_before_and_after(events, rows) + return events, token return self.runInteraction( @@ -354,3 +354,12 @@ class StreamStore(SQLBaseStore): key = res[0]["m"] return "s%d" % (key,) + + @staticmethod + def _set_before_and_after(events, rows): + for event, row in zip(events, rows): + stream = row["stream_ordering"] + topo = event.depth + internal = event.internal_metadata + internal.before = str(_StreamToken(topo, stream - 1)) + internal.after = str(_StreamToken(topo, stream)) -- cgit 1.5.1 From 8498d348d818aa2d2cb9bb9bb2775103840f355d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 30 Jan 2015 11:42:09 +0000 Subject: Fix token formatting --- synapse/handlers/sync.py | 6 +++--- synapse/storage/stream.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b83fcad655..3c68e2a9ec 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -164,7 +164,7 @@ class SyncHandler(BaseHandler): A Deferred RoomSyncResult. """ - recents, prev_batch_token, limited = self.load_filtered_recents( + recents, prev_batch_token, limited = yield self.load_filtered_recents( room_id, sync_config, now_token, ) @@ -288,7 +288,7 @@ class SyncHandler(BaseHandler): room_key = now_token.room_key while limited and len(recents) < sync_config.limit and max_repeat: - events, room_key = yield self.store.get_recent_events_for_room( + events, (room_key,_) = yield self.store.get_recent_events_for_room( room_id, limit=load_limit + 1, from_token=since_token.room_key if since_token else None, @@ -326,7 +326,7 @@ class SyncHandler(BaseHandler): # the previous sync and this one. # TODO(mjark): Check for redactions we might have missed. - recents, prev_batch_token, limited = self.load_filtered_recents( + recents, prev_batch_token, limited = yield self.load_filtered_recents( room_id, sync_config, now_token, since_token, ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 73504c8b52..3ccb6f8a61 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -316,9 +316,9 @@ class StreamStore(SQLBaseStore): toke = rows[0]["stream_ordering"] - 1 start_token = str(_StreamToken(topo, toke)) - token = (start_token, end_token) + token = (start_token, str(end_token)) else: - token = (end_token, end_token) + token = (str(end_token), str(end_token)) events = self._get_events_txn( txn, -- cgit 1.5.1 From 322a047502c938bfe9a6acab47e370e69fefc522 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 30 Jan 2015 14:46:03 +0000 Subject: Add room member count condition and default rule to make a noise on rooms of only 2 people. --- synapse/push/__init__.py | 50 ++++++++++++++++++++++++++++++++++++++++----- synapse/push/baserules.py | 14 +++++++++++++ synapse/storage/__init__.py | 5 ++++- 3 files changed, 63 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 19478c72a2..cc05278c8c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -24,6 +24,7 @@ import baserules import logging import fnmatch import json +import re logger = logging.getLogger(__name__) @@ -34,6 +35,8 @@ class Pusher(object): GIVE_UP_AFTER = 24 * 60 * 60 * 1000 DEFAULT_ACTIONS = ['notify'] + INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") + def __init__(self, _hs, instance_handle, user_name, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): @@ -88,11 +91,21 @@ class Pusher(object): member_events_for_room = yield self.store.get_current_state( room_id=ev['room_id'], event_type='m.room.member', - state_key=self.user_name + state_key=None ) my_display_name = None - if len(member_events_for_room) > 0: - my_display_name = member_events_for_room[0].content['displayname'] + room_member_count = 0 + for mev in member_events_for_room: + if mev.content['membership'] != 'join': + continue + + # This loop does two things: + # 1) Find our current display name + if mev.state_key == self.user_name: + my_display_name = mev.content['displayname'] + + # and 2) Get the number of people in that room + room_member_count += 1 for r in rules: matches = True @@ -102,7 +115,8 @@ class Pusher(object): for c in conditions: matches &= self._event_fulfills_condition( - ev, c, display_name=my_display_name + ev, c, display_name=my_display_name, + room_member_count=room_member_count ) # ignore rules with no actions (we have an explict 'dont_notify' if len(actions) == 0: @@ -116,7 +130,7 @@ class Pusher(object): defer.returnValue(Pusher.DEFAULT_ACTIONS) - def _event_fulfills_condition(self, ev, condition, display_name): + def _event_fulfills_condition(self, ev, condition, display_name, room_member_count): if condition['kind'] == 'event_match': if 'pattern' not in condition: logger.warn("event_match condition with no pattern") @@ -138,9 +152,35 @@ class Pusher(object): # the event stream. if 'content' not in ev or 'body' not in ev['content']: return False + if not display_name: + return False return fnmatch.fnmatch( ev['content']['body'].upper(), "*%s*" % (display_name.upper(),) ) + elif condition['kind'] == 'room_member_count': + if 'is' not in condition: + return False + m = Pusher.INEQUALITY_EXPR.match(condition['is']) + if not m: + return False + ineq = m.group(1) + rhs = m.group(2) + if not rhs.isdigit(): + return False + rhs = int(rhs) + + if ineq == '' or ineq == '==': + return room_member_count == rhs + elif ineq == '<': + return room_member_count < rhs + elif ineq == '>': + return room_member_count > rhs + elif ineq == '>=': + return room_member_count >= rhs + elif ineq == '<=': + return room_member_count <= rhs + else: + return False else: return True diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 4caf7beed2..bd162baade 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -32,4 +32,18 @@ def make_base_rules(user_name): } ] }, + { + 'conditions': [ + { + 'kind': 'room_member_count', + 'is': '2' + } + ], + 'actions': [ + 'notify', + { + 'set_sound': 'default' + } + ] + } ] \ No newline at end of file diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 277581b4e2..7b18acf421 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -375,9 +375,12 @@ class DataStore(RoomMemberStore, RoomStore, "redacted": del_sql, } - if event_type: + if event_type and state_key is not None: sql += " AND s.type = ? AND s.state_key = ? " args = (room_id, event_type, state_key) + elif event_type: + sql += " AND s.type = ?" + args = (room_id, event_type) else: args = (room_id, ) -- cgit 1.5.1 From 472cf532b7e64a30c5d7ecdeec9b8f89abf8276c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Jan 2015 14:48:03 +0000 Subject: Put CREATE rejections into seperate .sql --- synapse/storage/schema/im.sql | 7 ------- synapse/storage/schema/rejections.sql | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 7 deletions(-) create mode 100644 synapse/storage/schema/rejections.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql index bc7c6b6ed5..dd00c1cd2f 100644 --- a/synapse/storage/schema/im.sql +++ b/synapse/storage/schema/im.sql @@ -123,10 +123,3 @@ CREATE TABLE IF NOT EXISTS room_hosts( ); CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); - -CREATE TABLE IF NOT EXISTS rejections( - event_id TEXT NOT NULL, - reason TEXT NOT NULL, - last_check TEXT NOT NULL, - CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE -); diff --git a/synapse/storage/schema/rejections.sql b/synapse/storage/schema/rejections.sql new file mode 100644 index 0000000000..bd2a8b1bb5 --- /dev/null +++ b/synapse/storage/schema/rejections.sql @@ -0,0 +1,21 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS rejections( + event_id TEXT NOT NULL, + reason TEXT NOT NULL, + last_check TEXT NOT NULL, + CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE +); -- cgit 1.5.1 From 2f4cb04f455d24d0086b37bc363137e995d908d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Jan 2015 14:48:11 +0000 Subject: Be more specific in naming columns in selects. --- synapse/storage/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 1f5e74a16a..b350fd61f1 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -504,7 +504,7 @@ class SQLBaseStore(object): def _get_event_txn(self, txn, event_id, check_redacted=True, get_prev_content=False, allow_rejected=False): sql = ( - "SELECT internal_metadata, json, r.event_id, reason " + "SELECT e.internal_metadata, e.json, r.event_id, rej.reason " "FROM event_json as e " "LEFT JOIN redactions as r ON e.event_id = r.redacts " "LEFT JOIN rejections as rej on rej.event_id = e.event_id " -- cgit 1.5.1 From e97f756a05519f9d5a8a6ff78182b691dd1355df Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 30 Jan 2015 14:54:06 +0000 Subject: Use 'in' to test if the key exists, remove unused _filters_for_user --- synapse/api/filtering.py | 8 ++------ synapse/storage/filtering.py | 4 ---- 2 files changed, 2 insertions(+), 10 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index fa4de2614d..4d570b74f8 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -114,21 +114,17 @@ class Filtering(object): if not isinstance(event_type, basestring): raise SynapseError(400, "Event type should be a string") - try: + if "format" in definition: event_format = definition["format"] if event_format not in ["federation", "events"]: raise SynapseError(400, "Invalid format: %s" % (event_format,)) - except KeyError: - pass # format is optional - try: + if "select" in definition: event_select_list = definition["select"] for select_key in event_select_list: if select_key not in ["event_id", "origin_server_ts", "thread_id", "content", "content.body"]: raise SynapseError(400, "Bad select: %s" % (select_key,)) - except KeyError: - pass # select is optional if ("bundle_updates" in definition and type(definition["bundle_updates"]) != bool): diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index cb01c2040f..e86eeced45 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -20,10 +20,6 @@ from ._base import SQLBaseStore import json -# TODO(paul) -_filters_for_user = {} - - class FilteringStore(SQLBaseStore): @defer.inlineCallbacks def get_user_filter(self, user_localpart, filter_id): -- cgit 1.5.1 From 4f7fe63b6df891c196698b9d896ca7893c7a8a8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Jan 2015 14:57:53 +0000 Subject: Remember to add schema file to list --- synapse/storage/__init__.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f233ff2a2a..d03ee80303 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -70,6 +70,7 @@ SCHEMAS = [ "pusher", "media_repository", "filtering", + "rejections", ] -- cgit 1.5.1 From 91015ad008b0d4538022fbddae7da397f7bd7000 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Jan 2015 14:58:54 +0000 Subject: Remove merge conflict --- synapse/storage/__init__.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index d03ee80303..f35ece6446 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -32,11 +32,8 @@ from .event_federation import EventFederationStore from .pusher import PusherStore from .push_rule import PushRuleStore from .media_repository import MediaRepositoryStore -<<<<<<< HEAD from .rejections import RejectionsStore -======= ->>>>>>> 471c47441d0c188e845b75c8f446c44899fdcfe7 from .state import StateStore from .signatures import SignatureStore from .filtering import FilteringStore -- cgit 1.5.1 From a006d168c556ca71ad0bbb680c60f1ba170338fb Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Feb 2015 16:05:34 +0000 Subject: Actually merge into develop. --- synapse/storage/__init__.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 9bbd553dfc..1f207495f6 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -67,12 +67,9 @@ SCHEMAS = [ "event_signatures", "pusher", "media_repository", -<<<<<<< HEAD "application_services" -======= "filtering", "rejections", ->>>>>>> develop ] @@ -92,17 +89,13 @@ class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, FeedbackStore, PresenceStore, TransactionStore, DirectoryStore, KeyStore, StateStore, SignatureStore, -<<<<<<< HEAD - EventFederationStore, MediaRepositoryStore, - ApplicationServiceStore -======= + ApplicationServiceStore, EventFederationStore, MediaRepositoryStore, RejectionsStore, FilteringStore, PusherStore, PushRuleStore ->>>>>>> develop ): def __init__(self, hs): -- cgit 1.5.1 From 941f59101b51e9225dbdc38b22110a01de194242 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Feb 2015 16:56:01 +0000 Subject: Don't fail an entire request if one of the returned events fails a signature check. If an event does fail a signature check, look in the local database and request it from the originator. --- synapse/federation/federation_client.py | 107 ++++++++++++++++++++++++-------- synapse/storage/__init__.py | 21 ++++--- 2 files changed, 94 insertions(+), 34 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index e1539bd0e0..b809e935a0 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -224,17 +224,17 @@ class FederationClient(object): for p in result.get("auth_chain", []) ] - for i, pdu in enumerate(pdus): - pdus[i] = yield self._check_sigs_and_hash(pdu) - - # FIXME: We should handle signature failures more gracefully. + signed_pdus = yield self._check_sigs_and_hash_and_fetch( + pdus, outlier=True + ) - for i, pdu in enumerate(auth_chain): - auth_chain[i] = yield self._check_sigs_and_hash(pdu) + signed_auth = yield self._check_sigs_and_hash_and_fetch( + auth_chain, outlier=True + ) - # FIXME: We should handle signature failures more gracefully. + signed_auth.sort(key=lambda e: e.depth) - defer.returnValue((pdus, auth_chain)) + defer.returnValue((signed_pdus, signed_auth)) @defer.inlineCallbacks @log_function @@ -248,14 +248,13 @@ class FederationClient(object): for p in res["auth_chain"] ] - for i, pdu in enumerate(auth_chain): - auth_chain[i] = yield self._check_sigs_and_hash(pdu) - - # FIXME: We should handle signature failures more gracefully. + signed_auth = yield self._check_sigs_and_hash_and_fetch( + auth_chain, outlier=True + ) - auth_chain.sort(key=lambda e: e.depth) + signed_auth.sort(key=lambda e: e.depth) - defer.returnValue(auth_chain) + defer.returnValue(signed_auth) @defer.inlineCallbacks def make_join(self, destination, room_id, user_id): @@ -291,21 +290,19 @@ class FederationClient(object): for p in content.get("auth_chain", []) ] - for i, pdu in enumerate(state): - state[i] = yield self._check_sigs_and_hash(pdu) - - # FIXME: We should handle signature failures more gracefully. - - for i, pdu in enumerate(auth_chain): - auth_chain[i] = yield self._check_sigs_and_hash(pdu) + signed_state = yield self._check_sigs_and_hash_and_fetch( + state, outlier=True + ) - # FIXME: We should handle signature failures more gracefully. + signed_auth = yield self._check_sigs_and_hash_and_fetch( + auth_chain, outlier=True + ) auth_chain.sort(key=lambda e: e.depth) defer.returnValue({ - "state": state, - "auth_chain": auth_chain, + "state": signed_state, + "auth_chain": signed_auth, }) @defer.inlineCallbacks @@ -353,12 +350,18 @@ class FederationClient(object): ) auth_chain = [ - (yield self._check_sigs_and_hash(self.event_from_pdu_json(e))) + self.event_from_pdu_json(e) for e in content["auth_chain"] ] + signed_auth = yield self._check_sigs_and_hash_and_fetch( + auth_chain, outlier=True + ) + + signed_auth.sort(key=lambda e: e.depth) + ret = { - "auth_chain": auth_chain, + "auth_chain": signed_auth, "rejects": content.get("rejects", []), "missing": content.get("missing", []), } @@ -374,6 +377,58 @@ class FederationClient(object): return event + @defer.inlineCallbacks + def _check_sigs_and_hash_and_fetch(self, pdus, outlier=False): + """Takes a list of PDUs and checks the signatures and hashs of each + one. If a PDU fails its signature check then we check if we have it in + the database and if not then request if from the originating server of + that PDU. + + If a PDU fails its content hash check then it is redacted. + + The given list of PDUs are not modified, instead the function returns + a new list. + + Args: + pdu (list) + outlier (bool) + + Returns: + Deferred : A list of PDUs that have valid signatures and hashes. + """ + signed_pdus = [] + for pdu in pdus: + try: + new_pdu = yield self._check_sigs_and_hash(pdu) + signed_pdus.append(new_pdu) + except SynapseError: + # FIXME: We should handle signature failures more gracefully. + + # Check local db. + new_pdu = yield self.store.get_event( + pdu.event_id, + allow_rejected=True + ) + if new_pdu: + signed_pdus.append(new_pdu) + continue + + # Check pdu.origin + new_pdu = yield self.get_pdu( + destinations=[pdu.origin], + event_id=pdu.event_id, + outlier=outlier, + ) + + if new_pdu: + signed_pdus.append(new_pdu) + continue + + logger.warn("Failed to find copy of %s with valid signature") + + defer.returnValue(signed_pdus) + + @defer.inlineCallbacks def _check_sigs_and_hash(self, pdu): """Throws a SynapseError if the PDU does not have the correct diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7c54b1b9d3..b4a7a3f068 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -128,16 +128,21 @@ class DataStore(RoomMemberStore, RoomStore, pass @defer.inlineCallbacks - def get_event(self, event_id, allow_none=False): - events = yield self._get_events([event_id]) + def get_event(self, event_id, check_redacted=True, + get_prev_content=False, allow_rejected=False, + allow_none=False): + event = yield self.runInteraction( + "get_event", self._get_event_txn, + event_id, + check_redacted=check_redacted, + get_prev_content=get_prev_content, + allow_rejected=allow_rejected, + ) - if not events: - if allow_none: - defer.returnValue(None) - else: - raise RuntimeError("Could not find event %s" % (event_id,)) + if not event and not allow_none: + raise RuntimeError("Could not find event %s" % (event_id,)) - defer.returnValue(events[0]) + defer.returnValue(event) @log_function def _persist_event_txn(self, txn, event, context, backfilled, -- cgit 1.5.1 From 1a2de0c5feb1183b35045bb7fb9e379a9598d1cb Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Feb 2015 17:39:41 +0000 Subject: Implement txns for AS (un)registration. --- synapse/handlers/appservice.py | 3 +- synapse/storage/__init__.py | 2 +- synapse/storage/appservice.py | 120 +++++++++++++++++++++++++++++++++-------- 3 files changed, 102 insertions(+), 23 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 8bd475cbfd..da994ba8e0 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -43,7 +43,8 @@ class ApplicationServicesHandler(BaseHandler): "Consult the home server admin.", errcode=Codes.FORBIDDEN ) - # TODO store this AS + logger.info("Updating application service info...") + yield self.store.update_app_service(app_service) def unregister(self, token): logger.info("Unregister as_token=%s", token) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 1f207495f6..6ff0093136 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -67,7 +67,7 @@ SCHEMAS = [ "event_signatures", "pusher", "media_repository", - "application_services" + "application_services", "filtering", "rejections", ] diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 5a0e47e0d4..db0c546211 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -15,11 +15,17 @@ import logging from twisted.internet import defer +from synapse.api.errors import StoreError from ._base import SQLBaseStore logger = logging.getLogger(__name__) +namespace_enum = [ + "users", # 0 + "aliases", # 1 + "rooms" # 2 +] # XXX: This feels like it should belong in a "models" module, not storage. class ApplicationService(object): @@ -30,25 +36,26 @@ class ApplicationService(object): def __init__(self, token, url=None, namespaces=None): self.token = token - if url: - self.url = url - if namespaces: - self._set_namespaces(namespaces) + self.url = url + self.namespaces = self._get_namespaces(namespaces) - def _set_namespaces(self, namespaces): + def _get_namespaces(self, namespaces): # Sanity check that it is of the form: # { # users: ["regex",...], # aliases: ["regex",...], # rooms: ["regex",...], # } + if not namespaces: + return None + for ns in ["users", "rooms", "aliases"]: if type(namespaces[ns]) != list: raise ValueError("Bad namespace value for '%s'", ns) for regex in namespaces[ns]: if not isinstance(regex, basestring): raise ValueError("Expected string regex for ns '%s'", ns) - self.namespaces = namespaces + return namespaces def is_interested(self, event): """Check if this service is interested in this event. @@ -110,10 +117,38 @@ class ApplicationServiceStore(SQLBaseStore): This removes all AS specific regex and the base URL. The token is the only thing preserved for future registration attempts. """ - # TODO: DELETE FROM application_services_regex WHERE id=this service - # TODO: SET url=NULL WHERE token=token - # TODO: Update cache - pass + yield self.runInteraction( + "unregister_app_service", + self._unregister_app_service_txn, + token, + ) + # update cache TODO: Should this be in the txn? + for service in self.cache.services: + if service.token == token: + service.url = None + service.namespaces = None + + def _unregister_app_service_txn(self, txn, token): + # kill the url to prevent pushes + txn.execute( + "UPDATE application_services SET url=NULL WHERE token=?", + (token,) + ) + + # cleanup regex + as_id = self._get_as_id_txn(txn, token) + if not as_id: + logger.warning( + "unregister_app_service_txn: Failed to find as_id for token=", + token + ) + return False + + txn.execute( + "DELETE FROM application_services_regex WHERE as_id=?", + (as_id,) + ) + return True def update_app_service(self, service): """Update an application service, clobbering what was previously there. @@ -124,12 +159,61 @@ class ApplicationServiceStore(SQLBaseStore): # NB: There is no "insert" since we provide no public-facing API to # allocate new ASes. It relies on the server admin inserting the AS # token into the database manually. + if not service.token or not service.url: + raise StoreError(400, "Token and url must be specified.") + + yield self.runInteraction( + "update_app_service", + self._update_app_service_txn, + service + ) + + # update cache TODO: Should this be in the txn? + for (index, cache_service) in enumerate(self.cache.services): + if service.token == cache_service.token: + self.cache.services[index] = service + logger.info("Updated: %s", service) + return + # new entry + self.cache.services.append(service) + logger.info("Updated(new): %s", service) + + def _update_app_service_txn(self, txn, service): + as_id = self._get_as_id_txn(txn, service.token) + if not as_id: + logger.warning( + "update_app_service_txn: Failed to find as_id for token=", + service.token + ) + return False + + txn.execute( + "UPDATE application_services SET url=? WHERE id=?", + (service.url, as_id,) + ) + # cleanup regex + txn.execute( + "DELETE FROM application_services_regex WHERE id=?", + (as_id,) + ) + for (ns_int, ns_str) in enumerate(namespace_enum): + if ns_str in service.namespaces: + for regex in service.namespaces[ns_str]: + txn.execute( + "INSERT INTO application_services_regex(" + "as_id, namespace, regex) values(?,?,?)", + (as_id, ns_int, regex) + ) + return True - # TODO: UPDATE application_services, SET url WHERE token=service.token - # TODO: DELETE FROM application_services_regex WHERE id=this service - # TODO: INSERT INTO application_services_regex - # TODO: Update cache - pass + def _get_as_id_txn(self, txn, token): + cursor = txn.execute( + "SELECT id FROM application_services WHERE token=?", + (token,) + ) + res = cursor.fetchone() + if res: + return res[0] def get_services_for_event(self, event): return self.cache.get_services_for_event(event) @@ -161,12 +245,6 @@ class ApplicationServiceStore(SQLBaseStore): sql = ("SELECT * FROM application_services LEFT JOIN " "application_services_regex ON application_services.id = " "application_services_regex.as_id") - - namespace_enum = [ - "users", # 0 - "aliases", # 1 - "rooms" # 2 - ] # SQL results in the form: # [ # { -- cgit 1.5.1 From 9ff349a3cb1868bb2827047ed0f0d01a9a4c38c7 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Feb 2015 17:42:49 +0000 Subject: Add defers in the right places. --- synapse/storage/appservice.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index db0c546211..dd9b349370 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -111,6 +111,7 @@ class ApplicationServiceStore(SQLBaseStore): self.cache = ApplicationServiceCache() self._populate_cache() + @defer.inlineCallbacks def unregister_app_service(self, token): """Unregisters this service. @@ -150,6 +151,7 @@ class ApplicationServiceStore(SQLBaseStore): ) return True + @defer.inlineCallbacks def update_app_service(self, service): """Update an application service, clobbering what was previously there. -- cgit 1.5.1 From e7ca813dd476c83497d4130ad8efa9424d86e921 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Feb 2015 10:38:14 +0000 Subject: Try to ensure we don't persist an event we have already persisted. In persist_event check if we already have the event, if so then update instead of replacing so that we don't cause a bump of the stream_ordering. --- synapse/handlers/federation.py | 42 ++++++++++++++++++++++++++------------- synapse/storage/__init__.py | 40 +++++++++++++++++++++++++++++++++---- tests/handlers/test_federation.py | 5 ++++- 3 files changed, 68 insertions(+), 19 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8bf5a4cc11..c384789c2f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -112,6 +112,14 @@ class FederationHandler(BaseHandler): logger.debug("Event: %s", event) + event_ids = set() + if state: + event_ids += {e.event_id for e in state} + if auth_chain: + event_ids += {e.event_id for e in auth_chain} + + seen_ids = (yield self.store.have_events(event_ids)).keys() + # FIXME (erikj): Awful hack to make the case where we are not currently # in the room work current_state = None @@ -124,20 +132,26 @@ class FederationHandler(BaseHandler): current_state = state if state and auth_chain is not None: - for e in state: - e.internal_metadata.outlier = True - try: - auth_ids = [e_id for e_id, _ in e.auth_events] - auth = { - (e.type, e.state_key): e for e in auth_chain - if e.event_id in auth_ids - } - yield self._handle_new_event(origin, e, auth_events=auth) - except: - logger.exception( - "Failed to handle state event %s", - e.event_id, - ) + for list_of_pdus in [auth_chain, state]: + for e in list_of_pdus: + if e.event_id in seen_ids: + continue + + e.internal_metadata.outlier = True + try: + auth_ids = [e_id for e_id, _ in e.auth_events] + auth = { + (e.type, e.state_key): e for e in auth_chain + if e.event_id in auth_ids + } + yield self._handle_new_event( + origin, e, auth_events=auth + ) + except: + logger.exception( + "Failed to handle state event %s", + e.event_id, + ) try: yield self._handle_new_event( diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b4a7a3f068..93aefe0c48 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -161,6 +161,39 @@ class DataStore(RoomMemberStore, RoomStore, outlier = event.internal_metadata.is_outlier() + have_persisted = self._simple_select_one_onecol_txn( + txn, + table="event_json", + keyvalues={"event_id": event.event_id}, + retcol="event_id", + allow_none=True, + ) + + metadata_json = encode_canonical_json( + event.internal_metadata.get_dict() + ) + + if have_persisted: + if not outlier: + sql = ( + "UPDATE event_json SET internal_metadata = ?" + " WHERE event_id = ?" + ) + txn.execute( + sql, + (metadata_json.decode("UTF-8"), event.event_id,) + ) + + sql = ( + "UPDATE events SET outlier = 0" + " WHERE event_id = ?" + ) + txn.execute( + sql, + (event.event_id,) + ) + return + event_dict = { k: v for k, v in event.get_dict().items() @@ -170,10 +203,6 @@ class DataStore(RoomMemberStore, RoomStore, ] } - metadata_json = encode_canonical_json( - event.internal_metadata.get_dict() - ) - self._simple_insert_txn( txn, table="event_json", @@ -482,6 +511,9 @@ class DataStore(RoomMemberStore, RoomStore, the rejected reason string if we rejected the event, else maps to None. """ + if not event_ids: + return defer.succeed({}) + def f(txn): sql = ( "SELECT e.event_id, reason FROM events as e " diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 44dbce6bea..4270481139 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -91,7 +91,10 @@ class FederationTestCase(unittest.TestCase): self.datastore.persist_event.return_value = defer.succeed(None) self.datastore.get_room.return_value = defer.succeed(True) self.auth.check_host_in_room.return_value = defer.succeed(True) - self.datastore.have_events.return_value = defer.succeed({}) + + def have_events(event_ids): + return defer.succeed({}) + self.datastore.have_events.side_effect = have_events def annotate(ev, old_state=None): context = Mock() -- cgit 1.5.1 From 197f3ea4bad066da251c7925336baab8bee296c9 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 3 Feb 2015 11:26:33 +0000 Subject: Implement regex checks for app services. Expose handler.get_services_for_event which manages the checks for all services. --- synapse/handlers/appservice.py | 25 +++++++++++++-- synapse/storage/appservice.py | 71 ++++++++++++++++++++++++++---------------- 2 files changed, 67 insertions(+), 29 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index da994ba8e0..9b8dd1bb49 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -34,9 +34,11 @@ class ApplicationServicesHandler(BaseHandler): logger.info("Register -> %s", app_service) # check the token is recognised try: - stored_service = yield self.store.get_app_service(app_service.token) + stored_service = yield self.store.get_app_service_by_token( + app_service.token + ) if not stored_service: - raise StoreError(404, "Application Service Not found") + raise StoreError(404, "Application service not found") except StoreError: raise SynapseError( 403, "Unrecognised application services token. " @@ -50,6 +52,25 @@ class ApplicationServicesHandler(BaseHandler): logger.info("Unregister as_token=%s", token) yield self.store.unregister_app_service(token) + def get_services_for_event(self, event): + """Retrieve a list of application services interested in this event. + + Args: + event(Event): The event to check. + Returns: + list: A list of services interested in this + event based on the service regex. + """ + # We need to know the aliases associated with this event.room_id, if any + alias_list = [] # TODO + + interested_list = [ + s for s in self.store.get_app_services() if ( + s.is_interested(event, alias_list) + ) + ] + return interested_list + def notify_interested_services(self, event): """Notifies (pushes) all application services interested in this event. diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index dd9b349370..c4e50be4c6 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -13,8 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import re from twisted.internet import defer +from synapse.api.constants import EventTypes from synapse.api.errors import StoreError from ._base import SQLBaseStore @@ -27,6 +29,7 @@ namespace_enum = [ "rooms" # 2 ] + # XXX: This feels like it should belong in a "models" module, not storage. class ApplicationService(object): """Defines an application service. @@ -37,9 +40,9 @@ class ApplicationService(object): def __init__(self, token, url=None, namespaces=None): self.token = token self.url = url - self.namespaces = self._get_namespaces(namespaces) + self.namespaces = self._check_namespaces(namespaces) - def _get_namespaces(self, namespaces): + def _check_namespaces(self, namespaces): # Sanity check that it is of the form: # { # users: ["regex",...], @@ -57,22 +60,50 @@ class ApplicationService(object): raise ValueError("Expected string regex for ns '%s'", ns) return namespaces - def is_interested(self, event): + def _matches_regex(self, test_string, namespace_key): + for regex in self.namespaces[namespace_key]: + if re.match(regex, test_string): + return True + return False + + def _matches_user(self, event): + if (hasattr(event, "user_id") and + self._matches_regex(event.user_id, "users")): + return True + # also check m.room.member state key + if (hasattr(event, "type") and event.type == EventTypes.Member + and hasattr(event, "state_key") + and self._matches_regex(event.state_key, "users")): + return True + return False + + def _matches_room_id(self, event): + if hasattr(event, "room_id"): + return self._matches_regex(event.room_id, "rooms") + return False + + def _matches_aliases(self, event, alias_list): + for alias in alias_list: + if self._matches_regex(alias, "aliases"): + return True + return False + + def is_interested(self, event, aliases_for_event=None): """Check if this service is interested in this event. Args: event(Event): The event to check. + aliases_for_event(list): A list of all the known room aliases for + this event. Returns: bool: True if this service would like to know about this event. """ - # NB: This does not check room alias regex matches because that requires - # more context that an Event can provide. Room alias matches are checked - # in the ApplicationServiceHandler. - - # TODO check if event.room_id regex matches - # TODO check if event.user_id regex matches (or m.room.member state_key) + if aliases_for_event is None: + aliases_for_event = [] - return True + return (self._matches_user(event) + or self._matches_aliases(event, aliases_for_event) + or self._matches_room_id(event)) def __str__(self): return "ApplicationService: %s" % (self.__dict__,) @@ -89,20 +120,6 @@ class ApplicationServiceCache(object): def __init__(self): self.services = [] - def get_services_for_event(self, event): - """Retrieve a list of application services interested in this event. - - Args: - event(Event): The event to check. - Returns: - list: A list of services interested in this - event based on the service regex. - """ - interested_list = [ - s for s in self.services if s.is_event_claimed(event) - ] - return interested_list - class ApplicationServiceStore(SQLBaseStore): @@ -217,10 +234,10 @@ class ApplicationServiceStore(SQLBaseStore): if res: return res[0] - def get_services_for_event(self, event): - return self.cache.get_services_for_event(event) + def get_app_services(self): + return self.cache.services - def get_app_service(self, token, from_cache=True): + def get_app_service_by_token(self, token, from_cache=True): """Get the application service with the given token. Args: -- cgit 1.5.1 From 3bd2841fdbbbc2e290d13cbd1aa9becc315d2f1c Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 3 Feb 2015 11:37:52 +0000 Subject: Everyone loves SQL typos --- synapse/storage/appservice.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index c4e50be4c6..07ed0adcf8 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -212,7 +212,7 @@ class ApplicationServiceStore(SQLBaseStore): ) # cleanup regex txn.execute( - "DELETE FROM application_services_regex WHERE id=?", + "DELETE FROM application_services_regex WHERE as_id=?", (as_id,) ) for (ns_int, ns_str) in enumerate(namespace_enum): -- cgit 1.5.1 From a060b47b13037da56ed8db2978a297133c23fc7f Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 3 Feb 2015 13:17:28 +0000 Subject: Add namespace constants. Add restrict_to option to limit namespace checks. --- synapse/handlers/appservice.py | 25 +++++++++++++++---------- synapse/storage/appservice.py | 41 ++++++++++++++++++++++++----------------- 2 files changed, 39 insertions(+), 27 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 9b8dd1bb49..bf68b33398 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -52,11 +52,12 @@ class ApplicationServicesHandler(BaseHandler): logger.info("Unregister as_token=%s", token) yield self.store.unregister_app_service(token) - def get_services_for_event(self, event): + def get_services_for_event(self, event, restrict_to=""): """Retrieve a list of application services interested in this event. Args: event(Event): The event to check. + restrict_to(str): The namespace to restrict regex tests to. Returns: list: A list of services interested in this event based on the service regex. @@ -66,7 +67,7 @@ class ApplicationServicesHandler(BaseHandler): interested_list = [ s for s in self.store.get_app_services() if ( - s.is_interested(event, alias_list) + s.is_interested(event, restrict_to, alias_list) ) ] return interested_list @@ -80,11 +81,15 @@ class ApplicationServicesHandler(BaseHandler): Args: event(Event): The event to push out to interested services. """ - # TODO: Gather interested services - # get_services_for_event(event) <-- room IDs and user IDs - # Get a list of room aliases. Check regex. - # TODO: If unknown user: poke User Query API. - # TODO: If unknown room alias: poke Room Alias Query API. - - # TODO: Fork off pushes to these services - XXX First cut, best effort - pass + # Gather interested services + services = self.get_services_for_event(event) + if len(services) == 0: + return # no services need notifying + + # Do we know this user exists? If not, poke the user query API for + # all services which match that user regex. + + # Do we know this room alias exists? If not, poke the room alias query + # API for all services which match that room alias regex. + + # Fork off pushes to these services - XXX First cut, best effort diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 07ed0adcf8..277741fced 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -23,12 +23,6 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) -namespace_enum = [ - "users", # 0 - "aliases", # 1 - "rooms" # 2 -] - # XXX: This feels like it should belong in a "models" module, not storage. class ApplicationService(object): @@ -36,6 +30,10 @@ class ApplicationService(object): Provides methods to check if this service is "interested" in events. """ + NS_USERS = "users" + NS_ALIASES = "aliases" + NS_ROOMS = "rooms" + NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] def __init__(self, token, url=None, namespaces=None): self.token = token @@ -52,7 +50,7 @@ class ApplicationService(object): if not namespaces: return None - for ns in ["users", "rooms", "aliases"]: + for ns in ApplicationService.NS_LIST: if type(namespaces[ns]) != list: raise ValueError("Bad namespace value for '%s'", ns) for regex in namespaces[ns]: @@ -68,31 +66,36 @@ class ApplicationService(object): def _matches_user(self, event): if (hasattr(event, "user_id") and - self._matches_regex(event.user_id, "users")): + self._matches_regex( + event.user_id, ApplicationService.NS_USERS)): return True # also check m.room.member state key if (hasattr(event, "type") and event.type == EventTypes.Member and hasattr(event, "state_key") - and self._matches_regex(event.state_key, "users")): + and self._matches_regex( + event.state_key, ApplicationService.NS_USERS)): return True return False def _matches_room_id(self, event): if hasattr(event, "room_id"): - return self._matches_regex(event.room_id, "rooms") + return self._matches_regex( + event.room_id, ApplicationService.NS_ROOMS + ) return False def _matches_aliases(self, event, alias_list): for alias in alias_list: - if self._matches_regex(alias, "aliases"): + if self._matches_regex(alias, ApplicationService.NS_ALIASES): return True return False - def is_interested(self, event, aliases_for_event=None): + def is_interested(self, event, restrict_to=None, aliases_for_event=None): """Check if this service is interested in this event. Args: event(Event): The event to check. + restrict_to(str): The namespace to restrict regex tests to. aliases_for_event(list): A list of all the known room aliases for this event. Returns: @@ -100,6 +103,9 @@ class ApplicationService(object): """ if aliases_for_event is None: aliases_for_event = [] + if restrict_to not in ApplicationService.NS_LIST: + # this is a programming error, so raise a general exception + raise Exception("Unexpected restrict_to value: %s". restrict_to) return (self._matches_user(event) or self._matches_aliases(event, aliases_for_event) @@ -215,7 +221,7 @@ class ApplicationServiceStore(SQLBaseStore): "DELETE FROM application_services_regex WHERE as_id=?", (as_id,) ) - for (ns_int, ns_str) in enumerate(namespace_enum): + for (ns_int, ns_str) in enumerate(ApplicationService.NS_LIST): if ns_str in service.namespaces: for regex in service.namespaces[ns_str]: txn.execute( @@ -285,9 +291,9 @@ class ApplicationServiceStore(SQLBaseStore): "url": res["url"], "token": as_token, "namespaces": { - "users": [], - "aliases": [], - "rooms": [] + ApplicationService.NS_USERS: [], + ApplicationService.NS_ALIASES: [], + ApplicationService.NS_ROOMS: [] } } # add the namespace regex if one exists @@ -295,7 +301,8 @@ class ApplicationServiceStore(SQLBaseStore): if ns_int is None: continue try: - services[as_token]["namespaces"][namespace_enum[ns_int]].append( + services[as_token]["namespaces"][ + ApplicationService.NS_LIST[ns_int]].append( res["regex"] ) except IndexError: -- cgit 1.5.1 From f2c039bfb958ed349bce42098e296995786374cc Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 3 Feb 2015 13:29:27 +0000 Subject: Implement restricted namespace checks. Begin fleshing out the main hook for notifying application services. --- synapse/handlers/appservice.py | 19 +++++++++++++++++++ synapse/storage/appservice.py | 21 ++++++++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index bf68b33398..dac63e2245 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -17,6 +17,7 @@ from twisted.internet import defer from ._base import BaseHandler from synapse.api.errors import Codes, StoreError, SynapseError +from synapse.storage.appservice import ApplicationService import logging @@ -88,8 +89,26 @@ class ApplicationServicesHandler(BaseHandler): # Do we know this user exists? If not, poke the user query API for # all services which match that user regex. + unknown_user = False # TODO check + if unknown_user: + user_query_services = self.get_services_for_event( + event=event, + restrict_to=ApplicationService.NS_USERS + ) + for user_service in user_query_services: + pass # TODO poke User Query API # Do we know this room alias exists? If not, poke the room alias query # API for all services which match that room alias regex. + unknown_room_alias = False # TODO check + if unknown_room_alias: + alias_query_services = self.get_services_for_event( + event=event, + restrict_to=ApplicationService.NS_ALIASES + ) + for alias_service in alias_query_services: + pass # TODO poke Room Alias Query API # Fork off pushes to these services - XXX First cut, best effort + for service in services: + pass # TODO push event to service diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 277741fced..cdf26ee434 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -33,6 +33,9 @@ class ApplicationService(object): NS_USERS = "users" NS_ALIASES = "aliases" NS_ROOMS = "rooms" + # The ordering here is important as it is used to map database values (which + # are stored as ints representing the position in this list) to namespace + # values. NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] def __init__(self, token, url=None, namespaces=None): @@ -103,13 +106,21 @@ class ApplicationService(object): """ if aliases_for_event is None: aliases_for_event = [] - if restrict_to not in ApplicationService.NS_LIST: - # this is a programming error, so raise a general exception + if restrict_to and restrict_to not in ApplicationService.NS_LIST: + # this is a programming error, so fail early and raise a general + # exception raise Exception("Unexpected restrict_to value: %s". restrict_to) - return (self._matches_user(event) - or self._matches_aliases(event, aliases_for_event) - or self._matches_room_id(event)) + if not restrict_to: + return (self._matches_user(event) + or self._matches_aliases(event, aliases_for_event) + or self._matches_room_id(event)) + elif restrict_to == ApplicationService.NS_ALIASES: + return self._matches_aliases(event, aliases_for_event) + elif restrict_to == ApplicationService.NS_ROOMS: + return self._matches_room_id(event) + elif restrict_to == ApplicationService.NS_USERS: + return self._matches_user(event) def __str__(self): return "ApplicationService: %s" % (self.__dict__,) -- cgit 1.5.1 From 94a5db9f4d400a345c5d8b9f7bacb0c9ccf99959 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 3 Feb 2015 14:44:16 +0000 Subject: Add appservice package and move ApplicationService into it. --- synapse/appservice/__init__.py | 119 +++++++++++++++++++++++++++++++++++++++++ synapse/appservice/api.py | 15 ++++++ synapse/handlers/appservice.py | 4 +- synapse/storage/appservice.py | 105 +----------------------------------- 4 files changed, 138 insertions(+), 105 deletions(-) create mode 100644 synapse/appservice/__init__.py create mode 100644 synapse/appservice/api.py (limited to 'synapse/storage') diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py new file mode 100644 index 0000000000..f801fb5324 --- /dev/null +++ b/synapse/appservice/__init__.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.api.constants import EventTypes + +import re + + +class ApplicationService(object): + """Defines an application service. This definition is mostly what is + provided to the /register AS API. + + Provides methods to check if this service is "interested" in events. + """ + NS_USERS = "users" + NS_ALIASES = "aliases" + NS_ROOMS = "rooms" + # The ordering here is important as it is used to map database values (which + # are stored as ints representing the position in this list) to namespace + # values. + NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] + + def __init__(self, token, url=None, namespaces=None): + self.token = token + self.url = url + self.namespaces = self._check_namespaces(namespaces) + + def _check_namespaces(self, namespaces): + # Sanity check that it is of the form: + # { + # users: ["regex",...], + # aliases: ["regex",...], + # rooms: ["regex",...], + # } + if not namespaces: + return None + + for ns in ApplicationService.NS_LIST: + if type(namespaces[ns]) != list: + raise ValueError("Bad namespace value for '%s'", ns) + for regex in namespaces[ns]: + if not isinstance(regex, basestring): + raise ValueError("Expected string regex for ns '%s'", ns) + return namespaces + + def _matches_regex(self, test_string, namespace_key): + for regex in self.namespaces[namespace_key]: + if re.match(regex, test_string): + return True + return False + + def _matches_user(self, event): + if (hasattr(event, "user_id") and + self._matches_regex( + event.user_id, ApplicationService.NS_USERS)): + return True + # also check m.room.member state key + if (hasattr(event, "type") and event.type == EventTypes.Member + and hasattr(event, "state_key") + and self._matches_regex( + event.state_key, ApplicationService.NS_USERS)): + return True + return False + + def _matches_room_id(self, event): + if hasattr(event, "room_id"): + return self._matches_regex( + event.room_id, ApplicationService.NS_ROOMS + ) + return False + + def _matches_aliases(self, event, alias_list): + for alias in alias_list: + if self._matches_regex(alias, ApplicationService.NS_ALIASES): + return True + return False + + def is_interested(self, event, restrict_to=None, aliases_for_event=None): + """Check if this service is interested in this event. + + Args: + event(Event): The event to check. + restrict_to(str): The namespace to restrict regex tests to. + aliases_for_event(list): A list of all the known room aliases for + this event. + Returns: + bool: True if this service would like to know about this event. + """ + if aliases_for_event is None: + aliases_for_event = [] + if restrict_to and restrict_to not in ApplicationService.NS_LIST: + # this is a programming error, so fail early and raise a general + # exception + raise Exception("Unexpected restrict_to value: %s". restrict_to) + + if not restrict_to: + return (self._matches_user(event) + or self._matches_aliases(event, aliases_for_event) + or self._matches_room_id(event)) + elif restrict_to == ApplicationService.NS_ALIASES: + return self._matches_aliases(event, aliases_for_event) + elif restrict_to == ApplicationService.NS_ROOMS: + return self._matches_room_id(event) + elif restrict_to == ApplicationService.NS_USERS: + return self._matches_user(event) + + def __str__(self): + return "ApplicationService: %s" % (self.__dict__,) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py new file mode 100644 index 0000000000..803f97ea4f --- /dev/null +++ b/synapse/appservice/api.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index dac63e2245..f05b57bcb9 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -17,7 +17,7 @@ from twisted.internet import defer from ._base import BaseHandler from synapse.api.errors import Codes, StoreError, SynapseError -from synapse.storage.appservice import ApplicationService +from synapse.appservice import ApplicationService import logging @@ -96,6 +96,7 @@ class ApplicationServicesHandler(BaseHandler): restrict_to=ApplicationService.NS_USERS ) for user_service in user_query_services: + # this needs to block XXX: Need to feed response back to caller pass # TODO poke User Query API # Do we know this room alias exists? If not, poke the room alias query @@ -107,6 +108,7 @@ class ApplicationServicesHandler(BaseHandler): restrict_to=ApplicationService.NS_ALIASES ) for alias_service in alias_query_services: + # this needs to block XXX: Need to feed response back to caller pass # TODO poke Room Alias Query API # Fork off pushes to these services - XXX First cut, best effort diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index cdf26ee434..48bc7e0fe6 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -13,119 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re from twisted.internet import defer -from synapse.api.constants import EventTypes from synapse.api.errors import StoreError +from synapse.appservice import ApplicationService from ._base import SQLBaseStore logger = logging.getLogger(__name__) -# XXX: This feels like it should belong in a "models" module, not storage. -class ApplicationService(object): - """Defines an application service. - - Provides methods to check if this service is "interested" in events. - """ - NS_USERS = "users" - NS_ALIASES = "aliases" - NS_ROOMS = "rooms" - # The ordering here is important as it is used to map database values (which - # are stored as ints representing the position in this list) to namespace - # values. - NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] - - def __init__(self, token, url=None, namespaces=None): - self.token = token - self.url = url - self.namespaces = self._check_namespaces(namespaces) - - def _check_namespaces(self, namespaces): - # Sanity check that it is of the form: - # { - # users: ["regex",...], - # aliases: ["regex",...], - # rooms: ["regex",...], - # } - if not namespaces: - return None - - for ns in ApplicationService.NS_LIST: - if type(namespaces[ns]) != list: - raise ValueError("Bad namespace value for '%s'", ns) - for regex in namespaces[ns]: - if not isinstance(regex, basestring): - raise ValueError("Expected string regex for ns '%s'", ns) - return namespaces - - def _matches_regex(self, test_string, namespace_key): - for regex in self.namespaces[namespace_key]: - if re.match(regex, test_string): - return True - return False - - def _matches_user(self, event): - if (hasattr(event, "user_id") and - self._matches_regex( - event.user_id, ApplicationService.NS_USERS)): - return True - # also check m.room.member state key - if (hasattr(event, "type") and event.type == EventTypes.Member - and hasattr(event, "state_key") - and self._matches_regex( - event.state_key, ApplicationService.NS_USERS)): - return True - return False - - def _matches_room_id(self, event): - if hasattr(event, "room_id"): - return self._matches_regex( - event.room_id, ApplicationService.NS_ROOMS - ) - return False - - def _matches_aliases(self, event, alias_list): - for alias in alias_list: - if self._matches_regex(alias, ApplicationService.NS_ALIASES): - return True - return False - - def is_interested(self, event, restrict_to=None, aliases_for_event=None): - """Check if this service is interested in this event. - - Args: - event(Event): The event to check. - restrict_to(str): The namespace to restrict regex tests to. - aliases_for_event(list): A list of all the known room aliases for - this event. - Returns: - bool: True if this service would like to know about this event. - """ - if aliases_for_event is None: - aliases_for_event = [] - if restrict_to and restrict_to not in ApplicationService.NS_LIST: - # this is a programming error, so fail early and raise a general - # exception - raise Exception("Unexpected restrict_to value: %s". restrict_to) - - if not restrict_to: - return (self._matches_user(event) - or self._matches_aliases(event, aliases_for_event) - or self._matches_room_id(event)) - elif restrict_to == ApplicationService.NS_ALIASES: - return self._matches_aliases(event, aliases_for_event) - elif restrict_to == ApplicationService.NS_ROOMS: - return self._matches_room_id(event) - elif restrict_to == ApplicationService.NS_USERS: - return self._matches_user(event) - - def __str__(self): - return "ApplicationService: %s" % (self.__dict__,) - - class ApplicationServiceCache(object): """Caches ApplicationServices and provides utility functions on top. -- cgit 1.5.1 From dc7bb70f22edf8ef0631c961f2c77a82de7c76d5 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Feb 2015 16:51:07 +0000 Subject: s/instance_handle/profile_tag/ --- synapse/push/__init__.py | 8 ++++---- synapse/push/httppusher.py | 4 ++-- synapse/push/pusherpool.py | 12 ++++++------ synapse/rest/client/v1/push_rule.py | 28 ++++++++++++++-------------- synapse/rest/client/v1/pusher.py | 4 ++-- synapse/storage/pusher.py | 14 +++++++------- synapse/storage/schema/delta/v12.sql | 2 +- synapse/storage/schema/pusher.sql | 2 +- 8 files changed, 37 insertions(+), 37 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 00f3513c23..8c6f0a6571 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -37,14 +37,14 @@ class Pusher(object): INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") - def __init__(self, _hs, instance_handle, user_name, app_id, + def __init__(self, _hs, profile_tag, user_name, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): self.hs = _hs self.evStreamHandler = self.hs.get_handlers().event_stream_handler self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() - self.instance_handle = instance_handle + self.profile_tag = profile_tag self.user_name = user_name self.app_id = app_id self.app_display_name = app_display_name @@ -147,9 +147,9 @@ class Pusher(object): return False return fnmatch.fnmatch(val.upper(), pat.upper()) elif condition['kind'] == 'device': - if 'instance_handle' not in condition: + if 'profile_tag' not in condition: return True - return condition['instance_handle'] == self.instance_handle + return condition['profile_tag'] == self.profile_tag elif condition['kind'] == 'contains_display_name': # This is special because display names can be different # between rooms and so you can't really hard code it in a rule. diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 7c6953c989..5788db4eba 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -24,12 +24,12 @@ logger = logging.getLogger(__name__) class HttpPusher(Pusher): - def __init__(self, _hs, instance_handle, user_name, app_id, + def __init__(self, _hs, profile_tag, user_name, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): super(HttpPusher, self).__init__( _hs, - instance_handle, + profile_tag, user_name, app_id, app_display_name, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 4892c21e7b..5a525befd7 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -55,7 +55,7 @@ class PusherPool: self._start_pushers(pushers) @defer.inlineCallbacks - def add_pusher(self, user_name, instance_handle, kind, app_id, + def add_pusher(self, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, lang, data): # we try to create the pusher just to validate the config: it # will then get pulled out of the database, @@ -64,7 +64,7 @@ class PusherPool: self._create_pusher({ "user_name": user_name, "kind": kind, - "instance_handle": instance_handle, + "profile_tag": profile_tag, "app_id": app_id, "app_display_name": app_display_name, "device_display_name": device_display_name, @@ -77,18 +77,18 @@ class PusherPool: "failing_since": None }) yield self._add_pusher_to_store( - user_name, instance_handle, kind, app_id, + user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, lang, data ) @defer.inlineCallbacks - def _add_pusher_to_store(self, user_name, instance_handle, kind, app_id, + def _add_pusher_to_store(self, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, lang, data): yield self.store.add_pusher( user_name=user_name, - instance_handle=instance_handle, + profile_tag=profile_tag, kind=kind, app_id=app_id, app_display_name=app_display_name, @@ -104,7 +104,7 @@ class PusherPool: if pusherdict['kind'] == 'http': return HttpPusher( self.hs, - instance_handle=pusherdict['instance_handle'], + profile_tag=pusherdict['profile_tag'], user_name=pusherdict['user_name'], app_id=pusherdict['app_id'], app_display_name=pusherdict['app_display_name'], diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index faa7919fbb..348adb9c0d 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -112,7 +112,7 @@ class PushRuleRestServlet(ClientV1RestServlet): if device: conditions.append({ 'kind': 'device', - 'instance_handle': device + 'profile_tag': device }) if 'actions' not in req_obj: @@ -195,7 +195,7 @@ class PushRuleRestServlet(ClientV1RestServlet): for r in rules: conditions = json.loads(r['conditions']) - ih = _instance_handle_from_conditions(conditions) + ih = _profile_tag_from_conditions(conditions) if ih == spec['device'] and r['priority_class'] == priority_class: yield self.hs.get_datastore().delete_push_rule( user.to_string(), spec['rule_id'] @@ -239,19 +239,19 @@ class PushRuleRestServlet(ClientV1RestServlet): if r['priority_class'] > PushRuleRestServlet.PRIORITY_CLASS_MAP['override']: # per-device rule - instance_handle = _instance_handle_from_conditions(r["conditions"]) + profile_tag = _profile_tag_from_conditions(r["conditions"]) r = _strip_device_condition(r) - if not instance_handle: + if not profile_tag: continue - if instance_handle not in rules['device']: - rules['device'][instance_handle] = {} - rules['device'][instance_handle] = ( + if profile_tag not in rules['device']: + rules['device'][profile_tag] = {} + rules['device'][profile_tag] = ( _add_empty_priority_class_arrays( - rules['device'][instance_handle] + rules['device'][profile_tag] ) ) - rulearray = rules['device'][instance_handle][template_name] + rulearray = rules['device'][profile_tag][template_name] else: rulearray = rules['global'][template_name] @@ -282,13 +282,13 @@ class PushRuleRestServlet(ClientV1RestServlet): if path[0] == '': defer.returnValue((200, rules['device'])) - instance_handle = path[0] + profile_tag = path[0] path = path[1:] - if instance_handle not in rules['device']: + if profile_tag not in rules['device']: ret = {} ret = _add_empty_priority_class_arrays(ret) defer.returnValue((200, ret)) - ruleset = rules['device'][instance_handle] + ruleset = rules['device'][profile_tag] result = _filter_ruleset_with_path(ruleset, path) defer.returnValue((200, result)) else: @@ -304,14 +304,14 @@ def _add_empty_priority_class_arrays(d): return d -def _instance_handle_from_conditions(conditions): +def _profile_tag_from_conditions(conditions): """ Given a list of conditions, return the instance handle of the device rule if there is one """ for c in conditions: if c['kind'] == 'device': - return c['instance_handle'] + return c['profile_tag'] return None diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 353a4a6589..e10d2576d2 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -41,7 +41,7 @@ class PusherRestServlet(ClientV1RestServlet): ) defer.returnValue((200, {})) - reqd = ['instance_handle', 'kind', 'app_id', 'app_display_name', + reqd = ['profile_tag', 'kind', 'app_id', 'app_display_name', 'device_display_name', 'pushkey', 'lang', 'data'] missing = [] for i in reqd: @@ -54,7 +54,7 @@ class PusherRestServlet(ClientV1RestServlet): try: yield pusher_pool.add_pusher( user_name=user.to_string(), - instance_handle=content['instance_handle'], + profile_tag=content['profile_tag'], kind=content['kind'], app_id=content['app_id'], app_display_name=content['app_display_name'], diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index f253c9e2c3..e2a662a6c7 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -29,7 +29,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def get_pushers_by_app_id_and_pushkey(self, app_id_and_pushkey): sql = ( - "SELECT id, user_name, kind, instance_handle, app_id," + "SELECT id, user_name, kind, profile_tag, app_id," "app_display_name, device_display_name, pushkey, ts, data, " "last_token, last_success, failing_since " "FROM pushers " @@ -45,7 +45,7 @@ class PusherStore(SQLBaseStore): "id": r[0], "user_name": r[1], "kind": r[2], - "instance_handle": r[3], + "profile_tag": r[3], "app_id": r[4], "app_display_name": r[5], "device_display_name": r[6], @@ -64,7 +64,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def get_all_pushers(self): sql = ( - "SELECT id, user_name, kind, instance_handle, app_id," + "SELECT id, user_name, kind, profile_tag, app_id," "app_display_name, device_display_name, pushkey, ts, data, " "last_token, last_success, failing_since " "FROM pushers" @@ -77,7 +77,7 @@ class PusherStore(SQLBaseStore): "id": r[0], "user_name": r[1], "kind": r[2], - "instance_handle": r[3], + "profile_tag": r[3], "app_id": r[4], "app_display_name": r[5], "device_display_name": r[6], @@ -94,7 +94,7 @@ class PusherStore(SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def add_pusher(self, user_name, instance_handle, kind, app_id, + def add_pusher(self, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, lang, data): try: @@ -107,7 +107,7 @@ class PusherStore(SQLBaseStore): dict( user_name=user_name, kind=kind, - instance_handle=instance_handle, + profile_tag=profile_tag, app_display_name=app_display_name, device_display_name=device_display_name, ts=pushkey_ts, @@ -158,7 +158,7 @@ class PushersTable(Table): "id", "user_name", "kind", - "instance_handle", + "profile_tag", "app_id", "app_display_name", "device_display_name", diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql index a6867cba62..16c2258ca4 100644 --- a/synapse/storage/schema/delta/v12.sql +++ b/synapse/storage/schema/delta/v12.sql @@ -24,7 +24,7 @@ CREATE TABLE IF NOT EXISTS rejections( CREATE TABLE IF NOT EXISTS pushers ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT NOT NULL, - instance_handle varchar(32) NOT NULL, + profile_tag varchar(32) NOT NULL, kind varchar(8) NOT NULL, app_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index 8c4dfd5c1b..3735b11547 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -16,7 +16,7 @@ CREATE TABLE IF NOT EXISTS pushers ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT NOT NULL, - instance_handle varchar(32) NOT NULL, + profile_tag varchar(32) NOT NULL, kind varchar(8) NOT NULL, app_id varchar(64) NOT NULL, app_display_name varchar(64) NOT NULL, -- cgit 1.5.1 From 02be8da5e11d9abcfc962f962bbc4e9940b69199 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Feb 2015 17:34:07 +0000 Subject: Add doc to get_event --- synapse/storage/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 93aefe0c48..93ab26fcd1 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -131,6 +131,21 @@ class DataStore(RoomMemberStore, RoomStore, def get_event(self, event_id, check_redacted=True, get_prev_content=False, allow_rejected=False, allow_none=False): + """Get an event from the database by event_id. + + Args: + event_id (str): The event_id of the event to fetch + check_redacted (bool): If True, check if event has been redacted + and redact it. + get_prev_content (bool): If True and event is a state event, + include the previous states content in the unsigned field. + allow_rejected (bool): If True return rejected events. + allow_none (bool): If True, return None if no event found, if + False throw an exception. + + Returns: + Deferred : A FrozenEvent. + """ event = yield self.runInteraction( "get_event", self._get_event_txn, event_id, -- cgit 1.5.1 From c0462dbf1533f285f632dcb0a74c0ef0c3e2475b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Feb 2015 10:16:51 +0000 Subject: Rearrange persist_event so that do all the queries that need to be done before returning early if we have already persisted that event. --- synapse/events/__init__.py | 2 +- synapse/handlers/federation.py | 2 + synapse/storage/__init__.py | 145 +++++++++++++++++++++-------------------- 3 files changed, 77 insertions(+), 72 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index bf07951027..8f0c6e959f 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -77,7 +77,7 @@ class EventBase(object): return self.content["membership"] def is_state(self): - return hasattr(self, "state_key") + return hasattr(self, "state_key") and self.state_key is not None def get_dict(self): d = dict(self._event_dict) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 86953bf8c8..0876589e31 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -515,6 +515,8 @@ class FederationHandler(BaseHandler): "Failed to get destination from event %s", s.event_id ) + destinations.remove(origin) + logger.debug( "on_send_join_request: Sending event: %s, signatures: %s", event.event_id, diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 93ab26fcd1..30ce378900 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -163,19 +163,70 @@ class DataStore(RoomMemberStore, RoomStore, def _persist_event_txn(self, txn, event, context, backfilled, stream_ordering=None, is_new_state=True, current_state=None): - if event.type == EventTypes.Member: - self._store_room_member_txn(txn, event) - elif event.type == EventTypes.Feedback: - self._store_feedback_txn(txn, event) - elif event.type == EventTypes.Name: - self._store_room_name_txn(txn, event) - elif event.type == EventTypes.Topic: - self._store_room_topic_txn(txn, event) - elif event.type == EventTypes.Redaction: - self._store_redaction(txn, event) + + # We purposefully do this first since if we include a `current_state` + # key, we *want* to update the `current_state_events` table + if current_state: + txn.execute( + "DELETE FROM current_state_events WHERE room_id = ?", + (event.room_id,) + ) + + for s in current_state: + self._simple_insert_txn( + txn, + "current_state_events", + { + "event_id": s.event_id, + "room_id": s.room_id, + "type": s.type, + "state_key": s.state_key, + }, + or_replace=True, + ) + + if event.is_state() and is_new_state: + if not backfilled and not context.rejected: + self._simple_insert_txn( + txn, + table="state_forward_extremities", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "type": event.type, + "state_key": event.state_key, + }, + or_replace=True, + ) + + for prev_state_id, _ in event.prev_state: + self._simple_delete_txn( + txn, + table="state_forward_extremities", + keyvalues={ + "event_id": prev_state_id, + } + ) outlier = event.internal_metadata.is_outlier() + if not outlier: + self._store_state_groups_txn(txn, event, context) + + self._update_min_depth_for_room_txn( + txn, + event.room_id, + event.depth + ) + + self._handle_prev_events( + txn, + outlier=outlier, + event_id=event.event_id, + prev_events=event.prev_events, + room_id=event.room_id, + ) + have_persisted = self._simple_select_one_onecol_txn( txn, table="event_json", @@ -209,6 +260,17 @@ class DataStore(RoomMemberStore, RoomStore, ) return + if event.type == EventTypes.Member: + self._store_room_member_txn(txn, event) + elif event.type == EventTypes.Feedback: + self._store_feedback_txn(txn, event) + elif event.type == EventTypes.Name: + self._store_room_name_txn(txn, event) + elif event.type == EventTypes.Topic: + self._store_room_topic_txn(txn, event) + elif event.type == EventTypes.Redaction: + self._store_redaction(txn, event) + event_dict = { k: v for k, v in event.get_dict().items() @@ -273,41 +335,10 @@ class DataStore(RoomMemberStore, RoomStore, ) raise _RollbackButIsFineException("_persist_event") - self._handle_prev_events( - txn, - outlier=outlier, - event_id=event.event_id, - prev_events=event.prev_events, - room_id=event.room_id, - ) - - if not outlier: - self._store_state_groups_txn(txn, event, context) - if context.rejected: self._store_rejections_txn(txn, event.event_id, context.rejected) - if current_state: - txn.execute( - "DELETE FROM current_state_events WHERE room_id = ?", - (event.room_id,) - ) - - for s in current_state: - self._simple_insert_txn( - txn, - "current_state_events", - { - "event_id": s.event_id, - "room_id": s.room_id, - "type": s.type, - "state_key": s.state_key, - }, - or_replace=True, - ) - - is_state = hasattr(event, "state_key") and event.state_key is not None - if is_state: + if event.is_state(): vals = { "event_id": event.event_id, "room_id": event.room_id, @@ -315,6 +346,7 @@ class DataStore(RoomMemberStore, RoomStore, "state_key": event.state_key, } + # TODO: How does this work with backfilling? if hasattr(event, "replaces_state"): vals["prev_state"] = event.replaces_state @@ -351,28 +383,6 @@ class DataStore(RoomMemberStore, RoomStore, or_ignore=True, ) - if not backfilled and not context.rejected: - self._simple_insert_txn( - txn, - table="state_forward_extremities", - values={ - "event_id": event.event_id, - "room_id": event.room_id, - "type": event.type, - "state_key": event.state_key, - }, - or_replace=True, - ) - - for prev_state_id, _ in event.prev_state: - self._simple_delete_txn( - txn, - table="state_forward_extremities", - keyvalues={ - "event_id": prev_state_id, - } - ) - for hash_alg, hash_base64 in event.hashes.items(): hash_bytes = decode_base64(hash_base64) self._store_event_content_hash_txn( @@ -403,13 +413,6 @@ class DataStore(RoomMemberStore, RoomStore, txn, event.event_id, ref_alg, ref_hash_bytes ) - if not outlier: - self._update_min_depth_for_room_txn( - txn, - event.room_id, - event.depth - ) - def _store_redaction(self, txn, event): txn.execute( "INSERT OR IGNORE INTO redactions " -- cgit 1.5.1 From 03d415a6a23300e36b5e6c35080ac4dd8ab06815 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Feb 2015 10:40:59 +0000 Subject: Brief comment on why we do some things on every call to persist_event and not others --- synapse/storage/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 30ce378900..a63c59a8a2 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -239,6 +239,12 @@ class DataStore(RoomMemberStore, RoomStore, event.internal_metadata.get_dict() ) + # If we have already persisted this event, we don't need to do any + # more processing. + # The processing above must be done on every call to persist event, + # since they might not have happened on previous calls. For example, + # if we are persisting an event that we had persisted as an outlier, + # but is no longer one. if have_persisted: if not outlier: sql = ( -- cgit 1.5.1 From 17753f0c20d0d8190095c5a3183630b78bf9650c Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 4 Feb 2015 11:19:18 +0000 Subject: Add stub ApplicationServiceApi and glue it with the handler. --- synapse/appservice/__init__.py | 3 ++- synapse/appservice/api.py | 21 +++++++++++++++++++++ synapse/handlers/appservice.py | 18 +++++++++++++++--- synapse/storage/appservice.py | 1 + 4 files changed, 39 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index f801fb5324..92f64619c9 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -31,10 +31,11 @@ class ApplicationService(object): # values. NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] - def __init__(self, token, url=None, namespaces=None): + def __init__(self, token, url=None, namespaces=None, txn_id=None): self.token = token self.url = url self.namespaces = self._check_namespaces(namespaces) + self.txn_id = None def _check_namespaces(self, namespaces): # Sanity check that it is of the form: diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 803f97ea4f..158aded66e 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -13,3 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. + +class ApplicationServiceApi(object): + """This class manages HS -> AS communications, including querying and + pushing. + """ + + def __init__(self, hs): + self.hs_token = "_hs_token_" # TODO extract hs token + + def query_user(self, service, user_id): + pass + + def query_alias(self, service, alias): + pass + + def push_bulk(self, service, events): + pass + + def push(self, service, event): + pass + diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index f05b57bcb9..9cdeaa2d94 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -18,6 +18,7 @@ from twisted.internet import defer from ._base import BaseHandler from synapse.api.errors import Codes, StoreError, SynapseError from synapse.appservice import ApplicationService +from synapse.appservice.api import ApplicationServiceApi import logging @@ -29,6 +30,7 @@ class ApplicationServicesHandler(BaseHandler): def __init__(self, hs): super(ApplicationServicesHandler, self).__init__(hs) + self.appservice_api = ApplicationServiceApi(hs) @defer.inlineCallbacks def register(self, app_service): @@ -97,7 +99,12 @@ class ApplicationServicesHandler(BaseHandler): ) for user_service in user_query_services: # this needs to block XXX: Need to feed response back to caller - pass # TODO poke User Query API + is_known_user = self.appservice_api.query_user( + user_service, event + ) + if is_known_user: + # the user exists now,so don't query more ASes. + break # Do we know this room alias exists? If not, poke the room alias query # API for all services which match that room alias regex. @@ -109,8 +116,13 @@ class ApplicationServicesHandler(BaseHandler): ) for alias_service in alias_query_services: # this needs to block XXX: Need to feed response back to caller - pass # TODO poke Room Alias Query API + is_known_alias = self.appservice_api.query_alias( + alias_service, event + ) + if is_known_alias: + # the alias exists now so don't query more ASes. + break # Fork off pushes to these services - XXX First cut, best effort for service in services: - pass # TODO push event to service + self.appservice_api.push(service, event) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 48bc7e0fe6..abb617f049 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -216,6 +216,7 @@ class ApplicationServiceStore(SQLBaseStore): except IndexError: logger.error("Bad namespace enum '%s'. %s", ns_int, res) + # TODO get last successful txn id f.e. service for service in services.values(): logger.info("Found application service: %s", service) self.cache.services.append(ApplicationService( -- cgit 1.5.1 From 89f2e8fbdf7965d02426ef17ca6a9490219a2ec4 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 4 Feb 2015 15:21:03 +0000 Subject: Fix bug in store defer. Add more unit tests. --- synapse/storage/appservice.py | 18 +++++-- tests/appservice/test_appservice.py | 87 ++++++++++++++++++++++++++++++ tests/handlers/test_appservice.py | 6 +-- tests/storage/test_appservice.py | 105 ++++++++++++++++++++++++++++++++++++ 4 files changed, 207 insertions(+), 9 deletions(-) create mode 100644 tests/storage/test_appservice.py (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index abb617f049..b64416de28 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -40,7 +40,7 @@ class ApplicationServiceStore(SQLBaseStore): def __init__(self, hs): super(ApplicationServiceStore, self).__init__(hs) self.cache = ApplicationServiceCache() - self._populate_cache() + self.cache_defer = self._populate_cache() @defer.inlineCallbacks def unregister_app_service(self, token): @@ -49,6 +49,7 @@ class ApplicationServiceStore(SQLBaseStore): This removes all AS specific regex and the base URL. The token is the only thing preserved for future registration attempts. """ + yield self.cache_defer # make sure the cache is ready yield self.runInteraction( "unregister_app_service", self._unregister_app_service_txn, @@ -89,9 +90,13 @@ class ApplicationServiceStore(SQLBaseStore): Args: service(ApplicationService): The updated service. """ + yield self.cache_defer # make sure the cache is ready + # NB: There is no "insert" since we provide no public-facing API to # allocate new ASes. It relies on the server admin inserting the AS # token into the database manually. + + if not service.token or not service.url: raise StoreError(400, "Token and url must be specified.") @@ -148,9 +153,12 @@ class ApplicationServiceStore(SQLBaseStore): if res: return res[0] + @defer.inlineCallbacks def get_app_services(self): - return self.cache.services + yield self.cache_defer # make sure the cache is ready + defer.returnValue(self.cache.services) + @defer.inlineCallbacks def get_app_service_by_token(self, token, from_cache=True): """Get the application service with the given token. @@ -161,12 +169,14 @@ class ApplicationServiceStore(SQLBaseStore): Raises: StoreError if there was a problem retrieving this service. """ + yield self.cache_defer # make sure the cache is ready if from_cache: for service in self.cache.services: if service.token == token: - return service - return None + defer.returnValue(service) + return + defer.returnValue(None) # TODO: The from_cache=False impl # TODO: This should be JOINed with the application_services_regex table. diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 5cfd26daa6..c0aaf12785 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -56,3 +56,90 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.type = "m.room.member" self.event.state_key = "@irc_foobar:matrix.org" self.assertTrue(self.service.is_interested(self.event)) + + def test_regex_room_id_match(self): + self.service.namespaces[ApplicationService.NS_ROOMS].append( + "!some_prefix.*some_suffix:matrix.org" + ) + self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org" + self.assertTrue(self.service.is_interested(self.event)) + + def test_regex_room_id_no_match(self): + self.service.namespaces[ApplicationService.NS_ROOMS].append( + "!some_prefix.*some_suffix:matrix.org" + ) + self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org" + self.assertFalse(self.service.is_interested(self.event)) + + def test_regex_alias_match(self): + self.service.namespaces[ApplicationService.NS_ALIASES].append( + "#irc_.*:matrix.org" + ) + self.assertTrue(self.service.is_interested( + self.event, + aliases_for_event=["#irc_foobar:matrix.org", "#athing:matrix.org"] + )) + + def test_regex_alias_no_match(self): + self.service.namespaces[ApplicationService.NS_ALIASES].append( + "#irc_.*:matrix.org" + ) + self.assertFalse(self.service.is_interested( + self.event, + aliases_for_event=["#xmpp_foobar:matrix.org", "#athing:matrix.org"] + )) + + def test_regex_multiple_matches(self): + self.service.namespaces[ApplicationService.NS_ALIASES].append( + "#irc_.*:matrix.org" + ) + self.service.namespaces[ApplicationService.NS_USERS].append( + "@irc_.*" + ) + self.event.sender = "@irc_foobar:matrix.org" + self.assertTrue(self.service.is_interested( + self.event, + aliases_for_event=["#irc_barfoo:matrix.org"] + )) + + def test_restrict_to_rooms(self): + self.service.namespaces[ApplicationService.NS_ROOMS].append( + "!flibble_.*:matrix.org" + ) + self.service.namespaces[ApplicationService.NS_USERS].append( + "@irc_.*" + ) + self.event.sender = "@irc_foobar:matrix.org" + self.event.room_id = "!wibblewoo:matrix.org" + self.assertFalse(self.service.is_interested( + self.event, + restrict_to=ApplicationService.NS_ROOMS + )) + + def test_restrict_to_aliases(self): + self.service.namespaces[ApplicationService.NS_ALIASES].append( + "#xmpp_.*:matrix.org" + ) + self.service.namespaces[ApplicationService.NS_USERS].append( + "@irc_.*" + ) + self.event.sender = "@irc_foobar:matrix.org" + self.assertFalse(self.service.is_interested( + self.event, + restrict_to=ApplicationService.NS_ALIASES, + aliases_for_event=["#irc_barfoo:matrix.org"] + )) + + def test_restrict_to_senders(self): + self.service.namespaces[ApplicationService.NS_ALIASES].append( + "#xmpp_.*:matrix.org" + ) + self.service.namespaces[ApplicationService.NS_USERS].append( + "@irc_.*" + ) + self.event.sender = "@xmpp_foobar:matrix.org" + self.assertFalse(self.service.is_interested( + self.event, + restrict_to=ApplicationService.NS_USERS, + aliases_for_event=["#xmpp_barfoo:matrix.org"] + )) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 9c464e7fbc..1daa314f20 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -18,12 +18,8 @@ from .. import unittest from synapse.handlers.appservice import ApplicationServicesHandler -from collections import namedtuple from mock import Mock -# TODO: Should this be a more general thing? tests/api/test_filtering.py uses it -MockEvent = namedtuple("MockEvent", "sender type room_id") - class AppServiceHandlerTestCase(unittest.TestCase): """ Tests the ApplicationServicesHandler. """ @@ -51,7 +47,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.mock_store.get_app_services = Mock(return_value=services) - event = MockEvent( + event = Mock( sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar" diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py new file mode 100644 index 0000000000..56fdda377c --- /dev/null +++ b/tests/storage/test_appservice.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from tests import unittest +from twisted.internet import defer + +from synapse.appservice import ApplicationService +from synapse.server import HomeServer +from synapse.storage.appservice import ApplicationServiceStore + +from tests.utils import SQLiteMemoryDbPool, MockClock + + +class ApplicationServiceStoreTestCase(unittest.TestCase): + + @defer.inlineCallbacks + def setUp(self): + db_pool = SQLiteMemoryDbPool() + yield db_pool.prepare() + hs = HomeServer("test", db_pool=db_pool, clock=MockClock()) + self.as_token = "token1" + db_pool.runQuery( + "INSERT INTO application_services(token) VALUES(?)", + (self.as_token,) + ) + db_pool.runQuery( + "INSERT INTO application_services(token) VALUES(?)", ("token2",) + ) + db_pool.runQuery( + "INSERT INTO application_services(token) VALUES(?)", ("token3",) + ) + # must be done after inserts + self.store = ApplicationServiceStore(hs) + + @defer.inlineCallbacks + def test_update_and_retrieval_of_service(self): + url = "https://matrix.org/appservices/foobar" + user_regex = ["@foobar_.*:matrix.org"] + alias_regex = ["#foobar_.*:matrix.org"] + room_regex = [] + service = ApplicationService(url=url, token=self.as_token, namespaces={ + ApplicationService.NS_USERS: user_regex, + ApplicationService.NS_ALIASES: alias_regex, + ApplicationService.NS_ROOMS: room_regex + }) + yield self.store.update_app_service(service) + + stored_service = yield self.store.get_app_service_by_token( + self.as_token + ) + self.assertEquals(stored_service.token, self.as_token) + self.assertEquals(stored_service.url, url) + self.assertEquals( + stored_service.namespaces[ApplicationService.NS_ALIASES], + alias_regex + ) + self.assertEquals( + stored_service.namespaces[ApplicationService.NS_ROOMS], + room_regex + ) + self.assertEquals( + stored_service.namespaces[ApplicationService.NS_USERS], + user_regex + ) + + @defer.inlineCallbacks + def test_retrieve_unknown_service_token(self): + service = yield self.store.get_app_service_by_token("invalid_token") + self.assertEquals(service, None) + + @defer.inlineCallbacks + def test_retrieval_of_service(self): + stored_service = yield self.store.get_app_service_by_token( + self.as_token + ) + self.assertEquals(stored_service.token, self.as_token) + self.assertEquals(stored_service.url, None) + self.assertEquals( + stored_service.namespaces[ApplicationService.NS_ALIASES], + [] + ) + self.assertEquals( + stored_service.namespaces[ApplicationService.NS_ROOMS], + [] + ) + self.assertEquals( + stored_service.namespaces[ApplicationService.NS_USERS], + [] + ) + + @defer.inlineCallbacks + def test_retrieval_of_all_services(self): + services = yield self.store.get_app_services() + self.assertEquals(len(services), 3) -- cgit 1.5.1 From 27091f146a0ebdbfe1ae7c5cd30de51515cfbebc Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 5 Feb 2015 10:08:12 +0000 Subject: Add hs_token column and generate a different token f.e application service. --- synapse/appservice/__init__.py | 6 ++++-- synapse/appservice/api.py | 8 ++++---- synapse/handlers/appservice.py | 9 ++++++--- synapse/rest/appservice/v1/register.py | 4 ++-- synapse/storage/appservice.py | 17 ++++++++++++----- synapse/storage/schema/application_services.sql | 1 + tests/storage/test_appservice.py | 10 ++++++---- 7 files changed, 35 insertions(+), 20 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 0c7f58574e..f7baf578f0 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -34,11 +34,13 @@ class ApplicationService(object): # values. NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] - def __init__(self, token, url=None, namespaces=None, txn_id=None): + def __init__(self, token, url=None, namespaces=None, hs_token=None, + txn_id=None): self.token = token self.url = url + self.hs_token = hs_token self.namespaces = self._check_namespaces(namespaces) - self.txn_id = None + self.txn_id = txn_id def _check_namespaces(self, namespaces): # Sanity check that it is of the form: diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index fbf4abc526..29bb35d61b 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -30,7 +30,6 @@ class ApplicationServiceApi(SimpleHttpClient): def __init__(self, hs): super(ApplicationServiceApi, self).__init__(hs) - self.hs_token = "_hs_token_" # TODO extract hs token @defer.inlineCallbacks def query_user(self, service, user_id): @@ -38,7 +37,7 @@ class ApplicationServiceApi(SimpleHttpClient): response = None try: response = yield self.get_json(uri, { - "access_token": self.hs_token + "access_token": service.hs_token }) if response: # just an empty json object defer.returnValue(True) @@ -54,7 +53,7 @@ class ApplicationServiceApi(SimpleHttpClient): response = None try: response = yield self.get_json(uri, { - "access_token": self.hs_token + "access_token": service.hs_token }) if response: # just an empty json object defer.returnValue(True) @@ -76,9 +75,10 @@ class ApplicationServiceApi(SimpleHttpClient): "events": events }, { - "access_token": self.hs_token + "access_token": service.hs_token }) if response: # just an empty json object + # TODO: Mark txn as sent successfully defer.returnValue(True) except CodeMessageException as e: logger.warning("push_bulk to %s received %s", uri, e.code) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 2b2761682f..7b0599c71e 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -19,6 +19,7 @@ from ._base import BaseHandler from synapse.api.errors import Codes, StoreError, SynapseError from synapse.appservice import ApplicationService from synapse.appservice.api import ApplicationServiceApi +import synapse.util.stringutils as stringutils import logging @@ -53,10 +54,9 @@ class ApplicationServicesHandler(object): errcode=Codes.FORBIDDEN ) logger.info("Updating application service info...") + app_service.hs_token = self._generate_hs_token() yield self.store.update_app_service(app_service) - - logger.info("Sending ping to %s...", app_service.url) - yield self.appservice_api.push(app_service, "pinger") + defer.returnValue(app_service) def unregister(self, token): logger.info("Unregister as_token=%s", token) @@ -136,3 +136,6 @@ class ApplicationServicesHandler(object): # Fork off pushes to these services - XXX First cut, best effort for service in services: self.appservice_api.push(service, event) + + def _generate_hs_token(self): + return stringutils.random_string(18) diff --git a/synapse/rest/appservice/v1/register.py b/synapse/rest/appservice/v1/register.py index e374d538e7..d3d5aef220 100644 --- a/synapse/rest/appservice/v1/register.py +++ b/synapse/rest/appservice/v1/register.py @@ -61,8 +61,8 @@ class RegisterRestServlet(AppServiceRestServlet): app_service = ApplicationService(as_token, as_url, namespaces) - yield self.handler.register(app_service) - hs_token = "_not_implemented_yet" # TODO: Pull this from self.hs? + app_service = yield self.handler.register(app_service) + hs_token = app_service.hs_token defer.returnValue((200, { "hs_token": hs_token diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index b64416de28..3c8bf9ad0d 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -60,6 +60,7 @@ class ApplicationServiceStore(SQLBaseStore): if service.token == token: service.url = None service.namespaces = None + service.hs_token = None def _unregister_app_service_txn(self, txn, token): # kill the url to prevent pushes @@ -100,6 +101,9 @@ class ApplicationServiceStore(SQLBaseStore): if not service.token or not service.url: raise StoreError(400, "Token and url must be specified.") + if not service.hs_token: + raise StoreError(500, "No HS token") + yield self.runInteraction( "update_app_service", self._update_app_service_txn, @@ -126,8 +130,8 @@ class ApplicationServiceStore(SQLBaseStore): return False txn.execute( - "UPDATE application_services SET url=? WHERE id=?", - (service.url, as_id,) + "UPDATE application_services SET url=?, hs_token=? WHERE id=?", + (service.url, service.hs_token, as_id,) ) # cleanup regex txn.execute( @@ -196,6 +200,7 @@ class ApplicationServiceStore(SQLBaseStore): # 'namespace': enum, # 'as_id': 0, # 'token': "something", + # 'hs_token': "otherthing", # 'id': 0 # } # ] @@ -208,6 +213,7 @@ class ApplicationServiceStore(SQLBaseStore): services[as_token] = { "url": res["url"], "token": as_token, + "hs_token": res["hs_token"], "namespaces": { ApplicationService.NS_USERS: [], ApplicationService.NS_ALIASES: [], @@ -230,8 +236,9 @@ class ApplicationServiceStore(SQLBaseStore): for service in services.values(): logger.info("Found application service: %s", service) self.cache.services.append(ApplicationService( - service["token"], - service["url"], - service["namespaces"] + token=service["token"], + url=service["url"], + namespaces=service["namespaces"], + hs_token=service["hs_token"] )) diff --git a/synapse/storage/schema/application_services.sql b/synapse/storage/schema/application_services.sql index 6d245fc807..03b5a10c8a 100644 --- a/synapse/storage/schema/application_services.sql +++ b/synapse/storage/schema/application_services.sql @@ -17,6 +17,7 @@ CREATE TABLE IF NOT EXISTS application_services( id INTEGER PRIMARY KEY AUTOINCREMENT, url TEXT, token TEXT, + hs_token TEXT, UNIQUE(token) ON CONFLICT ROLLBACK ); diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 56fdda377c..b9ecfb3384 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -46,13 +46,15 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_update_and_retrieval_of_service(self): url = "https://matrix.org/appservices/foobar" + hs_token = "hstok" user_regex = ["@foobar_.*:matrix.org"] alias_regex = ["#foobar_.*:matrix.org"] room_regex = [] - service = ApplicationService(url=url, token=self.as_token, namespaces={ - ApplicationService.NS_USERS: user_regex, - ApplicationService.NS_ALIASES: alias_regex, - ApplicationService.NS_ROOMS: room_regex + service = ApplicationService( + url=url, hs_token=hs_token, token=self.as_token, namespaces={ + ApplicationService.NS_USERS: user_regex, + ApplicationService.NS_ALIASES: alias_regex, + ApplicationService.NS_ROOMS: room_regex }) yield self.store.update_app_service(service) -- cgit 1.5.1 From aaf50bf6f3d6adee92fa4d5cb55dbf3c5a13dbe3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Feb 2015 15:11:38 +0000 Subject: Give server default rules the 'default' attribute and fix various brokenness. --- synapse/push/baserules.py | 1 + synapse/rest/client/v1/push_rule.py | 4 +++- synapse/storage/push_rule.py | 7 +++++-- 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 191909ad4d..8d4b806da6 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -36,6 +36,7 @@ def make_base_rules(user, kind): for r in rules: r['priority_class'] = PRIORITY_CLASS_MAP[kind] + r['default'] = True return rules diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 7ab167ce03..80f116b1ed 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -345,7 +345,7 @@ def _priority_class_to_template_name(pc): def _rule_to_template(rule): unscoped_rule_id = None if 'rule_id' in rule: - _rule_id_from_namespaced(rule['rule_id']) + unscoped_rule_id = _rule_id_from_namespaced(rule['rule_id']) template_name = _priority_class_to_template_name(rule['priority_class']) if template_name in ['override', 'underride']: @@ -364,6 +364,8 @@ def _rule_to_template(rule): if unscoped_rule_id: templaterule['rule_id'] = unscoped_rule_id + if 'default' in rule: + templaterule['default'] = rule['default'] return templaterule diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 27502d2399..30e23445d9 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -176,7 +176,7 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, new_rule.values()) @defer.inlineCallbacks - def delete_push_rule(self, user_name, rule_id, **kwargs): + def delete_push_rule(self, user_name, rule_id): """ Delete a push rule. Args specify the row to be deleted and can be any of the columns in the push_rule table, but below are the @@ -186,7 +186,10 @@ class PushRuleStore(SQLBaseStore): user_name (str): The matrix ID of the push rule owner rule_id (str): The rule_id of the rule to be deleted """ - yield self._simple_delete_one(PushRuleTable.table_name, kwargs) + yield self._simple_delete_one( + PushRuleTable.table_name, + {'user_name': user_name, 'rule_id': rule_id} + ) class RuleNotFoundException(Exception): -- cgit 1.5.1 From a3c6010718c2749bd446bb63f3cf03bae09b0d20 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 5 Feb 2015 16:48:57 +0000 Subject: Add delta sql file. --- synapse/storage/schema/delta/v14.sql | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 synapse/storage/schema/delta/v14.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/v14.sql b/synapse/storage/schema/delta/v14.sql new file mode 100644 index 0000000000..03b5a10c8a --- /dev/null +++ b/synapse/storage/schema/delta/v14.sql @@ -0,0 +1,33 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS application_services( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT, + token TEXT, + hs_token TEXT, + UNIQUE(token) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS application_services_regex( + id INTEGER PRIMARY KEY AUTOINCREMENT, + as_id INTEGER NOT NULL, + namespace INTEGER, /* enum[room_id|room_alias|user_id] */ + regex TEXT, + FOREIGN KEY(as_id) REFERENCES application_services(id) +); + + + -- cgit 1.5.1 From 0cd66885e3ff7828282cc03dd8189763fdb7b927 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Feb 2015 14:38:04 +0000 Subject: Move delta/v13.sql to delta/v12.sql --- synapse/storage/schema/delta/v12.sql | 11 +++++++++++ synapse/storage/schema/delta/v13.sql | 24 ------------------------ 2 files changed, 11 insertions(+), 24 deletions(-) delete mode 100644 synapse/storage/schema/delta/v13.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql index 16c2258ca4..302d958dbf 100644 --- a/synapse/storage/schema/delta/v12.sql +++ b/synapse/storage/schema/delta/v12.sql @@ -52,3 +52,14 @@ CREATE TABLE IF NOT EXISTS push_rules ( ); CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); + +CREATE TABLE IF NOT EXISTS user_filters( + user_id TEXT, + filter_id INTEGER, + filter_json TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( + user_id, filter_id +); diff --git a/synapse/storage/schema/delta/v13.sql b/synapse/storage/schema/delta/v13.sql deleted file mode 100644 index beb39ca201..0000000000 --- a/synapse/storage/schema/delta/v13.sql +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS user_filters( - user_id TEXT, - filter_id INTEGER, - filter_json TEXT, - FOREIGN KEY(user_id) REFERENCES users(id) -); - -CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( - user_id, filter_id -); -- cgit 1.5.1 From ac3183caaa66b750996d90c0ac9ed430f623909c Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 9 Feb 2015 12:03:37 +0000 Subject: Register a user account for the AS when the AS registers. Add 'sender' column to AS table. --- synapse/appservice/__init__.py | 3 ++- synapse/handlers/appservice.py | 8 +++++++- synapse/storage/appservice.py | 5 +++-- synapse/storage/schema/application_services.sql | 1 + synapse/storage/schema/delta/v14.sql | 1 + 5 files changed, 14 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 46d46a5a48..fb9bfffe5d 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -35,10 +35,11 @@ class ApplicationService(object): NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS] def __init__(self, token, url=None, namespaces=None, hs_token=None, - txn_id=None): + sender=None, txn_id=None): self.token = token self.url = url self.hs_token = hs_token + self.sender = sender self.namespaces = self._check_namespaces(namespaces) self.txn_id = txn_id diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index fa810b9a98..5071a12eb1 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -52,8 +52,14 @@ class ApplicationServicesHandler(object): "Consult the home server admin.", errcode=Codes.FORBIDDEN ) - logger.info("Updating application service info...") + app_service.hs_token = self._generate_hs_token() + + # create a sender for this application service which is used when + # creating rooms, etc.. + account = yield self.hs.get_handlers().registration_handler.register() + app_service.sender = account[0] + yield self.store.update_app_service(app_service) defer.returnValue(app_service) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 3c8bf9ad0d..eef77e737e 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -130,8 +130,9 @@ class ApplicationServiceStore(SQLBaseStore): return False txn.execute( - "UPDATE application_services SET url=?, hs_token=? WHERE id=?", - (service.url, service.hs_token, as_id,) + "UPDATE application_services SET url=?, hs_token=?, sender=? " + "WHERE id=?", + (service.url, service.hs_token, service.sender, as_id,) ) # cleanup regex txn.execute( diff --git a/synapse/storage/schema/application_services.sql b/synapse/storage/schema/application_services.sql index 03b5a10c8a..e491ad5aec 100644 --- a/synapse/storage/schema/application_services.sql +++ b/synapse/storage/schema/application_services.sql @@ -18,6 +18,7 @@ CREATE TABLE IF NOT EXISTS application_services( url TEXT, token TEXT, hs_token TEXT, + sender TEXT, UNIQUE(token) ON CONFLICT ROLLBACK ); diff --git a/synapse/storage/schema/delta/v14.sql b/synapse/storage/schema/delta/v14.sql index 03b5a10c8a..e491ad5aec 100644 --- a/synapse/storage/schema/delta/v14.sql +++ b/synapse/storage/schema/delta/v14.sql @@ -18,6 +18,7 @@ CREATE TABLE IF NOT EXISTS application_services( url TEXT, token TEXT, hs_token TEXT, + sender TEXT, UNIQUE(token) ON CONFLICT ROLLBACK ); -- cgit 1.5.1 From 5a7dd058184613c70041a61fdbc2ccce104bb500 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 9 Feb 2015 14:14:15 +0000 Subject: Modify auth.get_user_by_req for authing appservices directly. Add logic to map the appservice token to the autogenned appservice user ID. Add unit tests for all forms of get_user_by_req (user/appservice, valid/bad/missing tokens) --- synapse/api/auth.py | 34 ++++---- synapse/storage/appservice.py | 4 +- tests/api/test_auth.py | 139 +++++++++++++++++++++++++++++++++ tests/rest/client/v1/test_presence.py | 3 + tests/rest/client/v2_alpha/__init__.py | 4 +- 5 files changed, 164 insertions(+), 20 deletions(-) create mode 100644 tests/api/test_auth.py (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index ea8c461729..310a428066 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -302,27 +302,26 @@ class Auth(object): # Check for application service tokens with a user_id override try: - if "user_id" not in request.args: - # This has to be done like this rather than relying on it - # natively throwing because tests use a Mock for the request - # object which doesn't throw :/ - raise KeyError - - masquerade_user_id = request.args["user_id"][0] app_service = yield self.store.get_app_service_by_token( access_token ) if not app_service: - raise AuthError( - 403, "Invalid application service access token" - ) - if not app_service.is_interested_in_user(masquerade_user_id): - raise AuthError( - 403, - "Application service cannot masquerade as this user." - ) + raise KeyError + + user_id = app_service.sender + if "user_id" in request.args: + user_id = request.args["user_id"][0] + if not app_service.is_interested_in_user(user_id): + raise AuthError( + 403, + "Application service cannot masquerade as this user." + ) + + if not user_id: + raise KeyError + defer.returnValue( - (UserID.from_string(masquerade_user_id), ClientInfo("", "")) + (UserID.from_string(user_id), ClientInfo("", "")) ) return except KeyError: @@ -366,8 +365,7 @@ class Auth(object): try: ret = yield self.store.get_user_by_token(token=token) if not ret: - raise StoreError() - + raise StoreError(400, "Unknown token") user_info = { "admin": bool(ret.get("admin", False)), "device_id": ret.get("device_id"), diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index eef77e737e..ba31c68595 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -215,6 +215,7 @@ class ApplicationServiceStore(SQLBaseStore): "url": res["url"], "token": as_token, "hs_token": res["hs_token"], + "sender": res["sender"], "namespaces": { ApplicationService.NS_USERS: [], ApplicationService.NS_ALIASES: [], @@ -240,6 +241,7 @@ class ApplicationServiceStore(SQLBaseStore): token=service["token"], url=service["url"], namespaces=service["namespaces"], - hs_token=service["hs_token"] + hs_token=service["hs_token"], + sender=service["sender"] )) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py new file mode 100644 index 0000000000..1d8367ce42 --- /dev/null +++ b/tests/api/test_auth.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from tests import unittest +from twisted.internet import defer + +from mock import Mock, NonCallableMock + +from synapse.api.auth import Auth +from synapse.api.errors import AuthError +from synapse.types import UserID + +class AuthTestCase(unittest.TestCase): + + def setUp(self): + self.state_handler = Mock() + self.store = Mock() + + self.hs = Mock() + self.hs.get_datastore = Mock(return_value=self.store) + self.hs.get_state_handler = Mock(return_value=self.state_handler) + self.auth = Auth(self.hs) + + self.test_user = "@foo:bar" + self.test_token = "_test_token_" + + @defer.inlineCallbacks + def test_get_user_by_req_user_valid_token(self): + self.store.get_app_service_by_token = Mock(return_value=None) + user_info = { + "name": self.test_user, + "device_id": "nothing", + "token_id": "ditto", + "admin": False + } + self.store.get_user_by_token = Mock(return_value=user_info) + + request = Mock(args={}) + request.args["access_token"] = [self.test_token] + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + (user, info) = yield self.auth.get_user_by_req(request) + self.assertEquals(user.to_string(), self.test_user) + + def test_get_user_by_req_user_bad_token(self): + self.store.get_app_service_by_token = Mock(return_value=None) + self.store.get_user_by_token = Mock(return_value=None) + + request = Mock(args={}) + request.args["access_token"] = [self.test_token] + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + d = self.auth.get_user_by_req(request) + self.failureResultOf(d, AuthError) + + def test_get_user_by_req_user_missing_token(self): + self.store.get_app_service_by_token = Mock(return_value=None) + user_info = { + "name": self.test_user, + "device_id": "nothing", + "token_id": "ditto", + "admin": False + } + self.store.get_user_by_token = Mock(return_value=user_info) + + request = Mock(args={}) + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + d = self.auth.get_user_by_req(request) + self.failureResultOf(d, AuthError) + + @defer.inlineCallbacks + def test_get_user_by_req_appservice_valid_token(self): + app_service = Mock(token="foobar", url="a_url", sender=self.test_user) + self.store.get_app_service_by_token = Mock(return_value=app_service) + self.store.get_user_by_token = Mock(return_value=None) + + request = Mock(args={}) + request.args["access_token"] = [self.test_token] + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + (user, info) = yield self.auth.get_user_by_req(request) + self.assertEquals(user.to_string(), self.test_user) + + def test_get_user_by_req_appservice_bad_token(self): + self.store.get_app_service_by_token = Mock(return_value=None) + self.store.get_user_by_token = Mock(return_value=None) + + request = Mock(args={}) + request.args["access_token"] = [self.test_token] + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + d = self.auth.get_user_by_req(request) + self.failureResultOf(d, AuthError) + + def test_get_user_by_req_appservice_missing_token(self): + app_service = Mock(token="foobar", url="a_url", sender=self.test_user) + self.store.get_app_service_by_token = Mock(return_value=app_service) + self.store.get_user_by_token = Mock(return_value=None) + + request = Mock(args={}) + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + d = self.auth.get_user_by_req(request) + self.failureResultOf(d, AuthError) + + @defer.inlineCallbacks + def test_get_user_by_req_appservice_valid_token_valid_user_id(self): + masquerading_user_id = "@doppelganger:matrix.org" + app_service = Mock(token="foobar", url="a_url", sender=self.test_user) + app_service.is_interested_in_user = Mock(return_value=True) + self.store.get_app_service_by_token = Mock(return_value=app_service) + self.store.get_user_by_token = Mock(return_value=None) + + request = Mock(args={}) + request.args["access_token"] = [self.test_token] + request.args["user_id"] = [masquerading_user_id] + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + (user, info) = yield self.auth.get_user_by_req(request) + self.assertEquals(user.to_string(), masquerading_user_id) + + def test_get_user_by_req_appservice_valid_token_bad_user_id(self): + masquerading_user_id = "@doppelganger:matrix.org" + app_service = Mock(token="foobar", url="a_url", sender=self.test_user) + app_service.is_interested_in_user = Mock(return_value=False) + self.store.get_app_service_by_token = Mock(return_value=app_service) + self.store.get_user_by_token = Mock(return_value=None) + + request = Mock(args={}) + request.args["access_token"] = [self.test_token] + request.args["user_id"] = [masquerading_user_id] + request.requestHeaders.getRawHeaders = Mock(return_value=[""]) + d = self.auth.get_user_by_req(request) + self.failureResultOf(d, AuthError) diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index f849120a3e..e5d876d89a 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -65,6 +65,7 @@ class PresenceStateTestCase(unittest.TestCase): hs.handlers = JustPresenceHandlers(hs) self.datastore = hs.get_datastore() + self.datastore.get_app_service_by_token = Mock(return_value=None) def get_presence_list(*a, **kw): return defer.succeed([]) @@ -154,6 +155,7 @@ class PresenceListTestCase(unittest.TestCase): hs.handlers = JustPresenceHandlers(hs) self.datastore = hs.get_datastore() + self.datastore.get_app_service_by_token = Mock(return_value=None) def has_presence_state(user_localpart): return defer.succeed( @@ -303,6 +305,7 @@ class PresenceEventStreamTestCase(unittest.TestCase): hs.handlers.room_member_handler.get_rooms_for_user = get_rooms_for_user self.mock_datastore = hs.get_datastore() + self.mock_datastore.get_app_service_by_token = Mock(return_value=None) def get_profile_displayname(user_id): return defer.succeed("Frank") diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py index fa70575c57..7c2b0dfa0e 100644 --- a/tests/rest/client/v2_alpha/__init__.py +++ b/tests/rest/client/v2_alpha/__init__.py @@ -59,6 +59,8 @@ class V2AlphaRestTestCase(unittest.TestCase): r.register_servlets(hs, self.mock_resource) def make_datastore_mock(self): - return Mock(spec=[ + store = Mock(spec=[ "insert_client_ip", ]) + store.get_app_service_by_token = Mock(return_value=None) + return store -- cgit 1.5.1 From 75656712e34694460ce7b12fc5a467667e04ea21 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Feb 2015 14:22:52 +0000 Subject: Time how long we're spending on the database thread --- synapse/app/homeserver.py | 2 ++ synapse/storage/_base.py | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0f175ec3f4..8976ff2e82 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -274,6 +274,8 @@ def setup(): hs.get_pusherpool().start() + hs.get_datastore().start_profiling() + if config.daemonize: print config.pid_file daemon = Daemonize( diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b350fd61f1..0849c5f1b4 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -85,6 +85,28 @@ class SQLBaseStore(object): self._db_pool = hs.get_db_pool() self._clock = hs.get_clock() + self._previous_txn_total_time = 0 + self._current_txn_total_time = 0 + self._previous_loop_ts = 0 + + def start_profiling(self): + self._previous_loop_ts = self._clock.time_msec() + + def loop(): + curr = self._current_txn_total_time + prev = self._previous_txn_total_time + self._previous_txn_total_time = curr + + time_now = self._clock.time_msec() + time_then = self._previous_loop_ts + self._previous_loop_ts = time_now + + ratio = (curr - prev)/(time_now - time_then) + + logger.info("Total database time: %.3f", ratio) + + self._clock.looping_call(loop, 1000) + @defer.inlineCallbacks def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" @@ -114,6 +136,9 @@ class SQLBaseStore(object): "[TXN END] {%s} %f", name, end - start ) + + self._current_txn_total_time += end - start + with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( inner_func, *args, **kwargs -- cgit 1.5.1 From 66fde49f071d75ea8bfdfac02fd4fa6fab5a9bf4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Feb 2015 14:45:15 +0000 Subject: Log database time every 10s and log as percentage --- synapse/storage/_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 0849c5f1b4..f1df5d39fd 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -103,9 +103,9 @@ class SQLBaseStore(object): ratio = (curr - prev)/(time_now - time_then) - logger.info("Total database time: %.3f", ratio) + logger.info("Total database time: %.3f%", ratio * 100) - self._clock.looping_call(loop, 1000) + self._clock.looping_call(loop, 10000) @defer.inlineCallbacks def runInteraction(self, desc, func, *args, **kwargs): -- cgit 1.5.1 From c4ee4ce93ec6075bc076b12520fd72769079f37c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Feb 2015 15:00:37 +0000 Subject: Fix typo --- synapse/storage/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index f1df5d39fd..310ee0104c 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -103,7 +103,7 @@ class SQLBaseStore(object): ratio = (curr - prev)/(time_now - time_then) - logger.info("Total database time: %.3f%", ratio * 100) + logger.info("Total database time: %.3f%%", ratio * 100) self._clock.looping_call(loop, 10000) -- cgit 1.5.1 From 3a5ad7dbd5a375023c96ee65c901f8be5ab02341 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 9 Feb 2015 17:55:56 +0000 Subject: Performance counters for database transaction names --- synapse/storage/_base.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 310ee0104c..bcb03cbdcb 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -39,6 +39,7 @@ class LoggingTransaction(object): passed to the constructor. Adds logging to the .execute() method.""" __slots__ = ["txn", "name"] + def __init__(self, txn, name): object.__setattr__(self, "txn", txn) object.__setattr__(self, "name", name) @@ -88,6 +89,8 @@ class SQLBaseStore(object): self._previous_txn_total_time = 0 self._current_txn_total_time = 0 self._previous_loop_ts = 0 + self._txn_perf_counters = {} + self._previous_txn_perf_counters = {} def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() @@ -103,7 +106,29 @@ class SQLBaseStore(object): ratio = (curr - prev)/(time_now - time_then) - logger.info("Total database time: %.3f%%", ratio * 100) + txn_counters = [] + for name, (count, cum_time) in self._txn_perf_counters.items(): + prev_count, prev_time = self._previous_txn_perf_counters.get( + name, (0,0) + ) + txn_counters.append(( + (cum_time - prev_time) / (time_now - time_then), + count - prev_count, + name + )) + + self._previous_txn_perf_counters = dict(self._txn_perf_counters) + + txn_counters.sort(reverse=True) + top_three_counters = ", ".join( + "%s(%d): %.3f%%" % (name, count, 100 * ratio) + for ratio, count, name in txn_counters[:3] + ) + + logger.info( + "Total database time: %.3f%% {%s}", + ratio * 100, top_three_counters + ) self._clock.looping_call(loop, 10000) @@ -139,6 +164,11 @@ class SQLBaseStore(object): self._current_txn_total_time += end - start + count, cum_time = self._txn_perf_counters.get(name, (0,0)) + count += 1 + cum_time += end - start + self._txn_perf_counters[name] = (count, cum_time) + with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( inner_func, *args, **kwargs -- cgit 1.5.1 From 347b497db0355fe4e26ae3a51967aa91bec090d3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 9 Feb 2015 17:57:09 +0000 Subject: Formatting --- synapse/storage/_base.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index bcb03cbdcb..45f4b994eb 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -39,7 +39,6 @@ class LoggingTransaction(object): passed to the constructor. Adds logging to the .execute() method.""" __slots__ = ["txn", "name"] - def __init__(self, txn, name): object.__setattr__(self, "txn", txn) object.__setattr__(self, "name", name) -- cgit 1.5.1 From 0c4536da8fe75a207052fb558414b4408aa857ec Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 9 Feb 2015 18:06:31 +0000 Subject: Use the transaction 'desc' rather than 'name', increment the txn_ids in txn names --- synapse/storage/_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 45f4b994eb..5ddd410607 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -140,7 +140,7 @@ class SQLBaseStore(object): with LoggingContext("runInteraction") as context: current_context.copy_to(context) start = time.time() * 1000 - txn_id = SQLBaseStore._TXN_ID + txn_id = self._TXN_ID # We don't really need these to be unique, so lets stop it from # growing really large. @@ -163,10 +163,10 @@ class SQLBaseStore(object): self._current_txn_total_time += end - start - count, cum_time = self._txn_perf_counters.get(name, (0,0)) + count, cum_time = self._txn_perf_counters.get(desc, (0,0)) count += 1 cum_time += end - start - self._txn_perf_counters[name] = (count, cum_time) + self._txn_perf_counters[desc] = (count, cum_time) with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( -- cgit 1.5.1 From d7c7efb691bd726ec3e8879e289546fbcfd7dabd Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 10 Feb 2015 14:50:53 +0000 Subject: Add performance counters for different stages of loading events --- synapse/storage/_base.py | 84 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 60 insertions(+), 24 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 5ddd410607..c79399fe5e 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -77,6 +77,43 @@ class LoggingTransaction(object): sql_logger.debug("[SQL time] {%s} %f", self.name, end - start) +class PerformanceCounters(object): + def __init__(self): + self.current_counters = {} + self.previous_counters = {} + + def update(self, key, start_time, end_time=None): + if end_time is None: + end_time = time.time() * 1000; + duration = end_time - start_time + count, cum_time = self.current_counters.get(key, (0, 0)) + count += 1 + cum_time += duration + self.current_counters[key] = (count, cum_time) + return end_time + + def interval(self, interval_duration, limit=3): + counters = [] + for name, (count, cum_time) in self.current_counters.items(): + prev_count, prev_time = self.previous_counters.get(name, (0, 0)) + counters.append(( + (cum_time - prev_time) / interval_duration, + count - prev_count, + name + )) + + self.previous_counters = dict(self.current_counters) + + counters.sort(reverse=True) + + top_n_counters = ", ".join( + "%s(%d): %.3f%%" % (name, count, 100 * ratio) + for ratio, count, name in txn_counters[:limit] + ) + + return top_n_counters + + class SQLBaseStore(object): _TXN_ID = 0 @@ -88,8 +125,8 @@ class SQLBaseStore(object): self._previous_txn_total_time = 0 self._current_txn_total_time = 0 self._previous_loop_ts = 0 - self._txn_perf_counters = {} - self._previous_txn_perf_counters = {} + self._txn_perf_counters = PerformanceCounters() + self._get_event_counters = PerformanceCounters() def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() @@ -105,23 +142,12 @@ class SQLBaseStore(object): ratio = (curr - prev)/(time_now - time_then) - txn_counters = [] - for name, (count, cum_time) in self._txn_perf_counters.items(): - prev_count, prev_time = self._previous_txn_perf_counters.get( - name, (0,0) - ) - txn_counters.append(( - (cum_time - prev_time) / (time_now - time_then), - count - prev_count, - name - )) - - self._previous_txn_perf_counters = dict(self._txn_perf_counters) - - txn_counters.sort(reverse=True) - top_three_counters = ", ".join( - "%s(%d): %.3f%%" % (name, count, 100 * ratio) - for ratio, count, name in txn_counters[:3] + top_three_counters = self._txn_perf_counters.interval( + time_now - time_then, limit=3 + ) + + top_3_event_counters = self._get_event_counters.interval( + time_now - time_then, limit=3 ) logger.info( @@ -162,11 +188,7 @@ class SQLBaseStore(object): ) self._current_txn_total_time += end - start - - count, cum_time = self._txn_perf_counters.get(desc, (0,0)) - count += 1 - cum_time += end - start - self._txn_perf_counters[desc] = (count, cum_time) + self._txn_perf_counters.update(desc, start, end) with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( @@ -566,6 +588,8 @@ class SQLBaseStore(object): "LIMIT 1 " ) + start_time = time.time() * 1000; + txn.execute(sql, (event_id,)) res = txn.fetchone() @@ -575,6 +599,8 @@ class SQLBaseStore(object): internal_metadata, js, redacted, rejected_reason = res + self._get_event_counters.update("select_event", start_time) + if allow_rejected or not rejected_reason: return self._get_event_from_row_txn( txn, internal_metadata, js, redacted, @@ -586,10 +612,18 @@ class SQLBaseStore(object): def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted, check_redacted=True, get_prev_content=False): + + start_time = time.time() * 1000; + update_counter = self._get_event_counters.update + d = json.loads(js) + start_time = update_counter("decode_json", start_time) + internal_metadata = json.loads(internal_metadata) + start_time = update_counter("decode_internal", start_time) ev = FrozenEvent(d, internal_metadata_dict=internal_metadata) + start_time = update_counter("build_frozen_event", start_time) if check_redacted and redacted: ev = prune_event(ev) @@ -605,6 +639,7 @@ class SQLBaseStore(object): if because: ev.unsigned["redacted_because"] = because + start_time = update_counter("redact_event", start_time) if get_prev_content and "replaces_state" in ev.unsigned: prev = self._get_event_txn( @@ -614,6 +649,7 @@ class SQLBaseStore(object): ) if prev: ev.unsigned["prev_content"] = prev.get_dict()["content"] + start_time = update_counter("get_prev_content", start_time) return ev -- cgit 1.5.1 From fda4422bc9d9f2974d7185011d6d905eea372b09 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 10 Feb 2015 14:54:07 +0000 Subject: Fix pyflakes --- synapse/storage/_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index c79399fe5e..36455ef93c 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -108,7 +108,7 @@ class PerformanceCounters(object): top_n_counters = ", ".join( "%s(%d): %.3f%%" % (name, count, 100 * ratio) - for ratio, count, name in txn_counters[:limit] + for ratio, count, name in counters[:limit] ) return top_n_counters @@ -151,8 +151,8 @@ class SQLBaseStore(object): ) logger.info( - "Total database time: %.3f%% {%s}", - ratio * 100, top_three_counters + "Total database time: %.3f%% {%s} {%s}", + ratio * 100, top_three_counters, top_3_event_counters ) self._clock.looping_call(loop, 10000) -- cgit 1.5.1 From b085fac7353e1cd395b89f9334c8273a8e996f48 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 10 Feb 2015 16:30:48 +0000 Subject: Code-style fixes --- synapse/handlers/presence.py | 4 +++- synapse/handlers/register.py | 13 ++++++++----- synapse/push/__init__.py | 8 +++++--- synapse/push/baserules.py | 11 ++++++----- synapse/push/rulekinds.py | 12 ++++++------ synapse/python_dependencies.py | 6 ++++-- synapse/rest/client/v1/push_rule.py | 18 +++++++++++++----- synapse/rest/client/v1/pusher.py | 4 ++-- synapse/rest/media/v1/upload_resource.py | 7 ++++--- synapse/storage/_base.py | 6 +++--- synapse/storage/push_rule.py | 4 +++- 11 files changed, 57 insertions(+), 36 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index cd0798c2b0..6a266ee0fe 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -658,7 +658,9 @@ class PresenceHandler(BaseHandler): observers = set(self._remote_recvmap.get(user, set())) if observers: - logger.debug(" | %d interested local observers %r", len(observers), observers) + logger.debug( + " | %d interested local observers %r", len(observers), observers + ) rm_handler = self.homeserver.get_handlers().room_member_handler room_ids = yield rm_handler.get_rooms_for_user(user) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4f06c487b1..0247327eb9 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -105,17 +105,20 @@ class RegistrationHandler(BaseHandler): # do it here. try: auth_user = UserID.from_string(user_id) - identicon_resource = self.hs.get_resource_for_media_repository().getChildWithDefault("identicon", None) - upload_resource = self.hs.get_resource_for_media_repository().getChildWithDefault("upload", None) + media_repository = self.hs.get_resource_for_media_repository() + identicon_resource = media_repository.getChildWithDefault("identicon", None) + upload_resource = media_repository.getChildWithDefault("upload", None) identicon_bytes = identicon_resource.generate_identicon(user_id, 320, 320) content_uri = yield upload_resource.create_content( "image/png", None, identicon_bytes, len(identicon_bytes), auth_user ) profile_handler = self.hs.get_handlers().profile_handler - profile_handler.set_avatar_url(auth_user, auth_user, ("%s#auto" % content_uri)) + profile_handler.set_avatar_url( + auth_user, auth_user, ("%s#auto" % (content_uri,)) + ) except NotImplementedError: - pass # make tests pass without messing around creating default avatars - + pass # make tests pass without messing around creating default avatars + defer.returnValue((user_id, token)) @defer.inlineCallbacks diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 6f143a5df9..418a348a58 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -140,7 +140,7 @@ class Pusher(object): lambda x: ('[%s%s]' % (x.group(1) and '^' or '', re.sub(r'\\\-', '-', x.group(2)))), r) return r - + def _event_fulfills_condition(self, ev, condition, display_name, room_member_count): if condition['kind'] == 'event_match': if 'pattern' not in condition: @@ -170,8 +170,10 @@ class Pusher(object): return False if not display_name: return False - return re.search("\b%s\b" % re.escape(display_name), - ev['content']['body'], flags=re.IGNORECASE) is not None + return re.search( + "\b%s\b" % re.escape(display_name), ev['content']['body'], + flags=re.IGNORECASE + ) is not None elif condition['kind'] == 'room_member_count': if 'is' not in condition: diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 37878f1e0b..162d265f66 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -1,5 +1,6 @@ from synapse.push.rulekinds import PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP + def list_with_base_rules(rawrules, user_name): ruleslist = [] @@ -9,9 +10,9 @@ def list_with_base_rules(rawrules, user_name): if r['priority_class'] < current_prio_class: while r['priority_class'] < current_prio_class: ruleslist.extend(make_base_rules( - user_name, - PRIORITY_CLASS_INVERSE_MAP[current_prio_class]) - ) + user_name, + PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + )) current_prio_class -= 1 ruleslist.append(r) @@ -19,8 +20,8 @@ def list_with_base_rules(rawrules, user_name): while current_prio_class > 0: ruleslist.extend(make_base_rules( user_name, - PRIORITY_CLASS_INVERSE_MAP[current_prio_class]) - ) + PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + )) current_prio_class -= 1 return ruleslist diff --git a/synapse/push/rulekinds.py b/synapse/push/rulekinds.py index 763bdee58e..660aa4e10e 100644 --- a/synapse/push/rulekinds.py +++ b/synapse/push/rulekinds.py @@ -1,8 +1,8 @@ PRIORITY_CLASS_MAP = { - 'underride': 1, - 'sender': 2, - 'room': 3, - 'content': 4, - 'override': 5, - } + 'underride': 1, + 'sender': 2, + 'room': 3, + 'content': 4, + 'override': 5, +} PRIORITY_CLASS_INVERSE_MAP = {v: k for k, v in PRIORITY_CLASS_MAP.items()} diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index a89d618606..fd68da9dfb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -19,10 +19,11 @@ REQUIREMENTS = { "pydenticon": ["pydenticon"], } + def github_link(project, version, egg): return "https://github.com/%s/tarball/%s/#egg=%s" % (project, version, egg) -DEPENDENCY_LINKS=[ +DEPENDENCY_LINKS = [ github_link( project="matrix-org/syutil", version="v0.0.2", @@ -101,6 +102,7 @@ def check_requirements(): % (dependency, file_path, version, required_version) ) + def list_requirements(): result = [] linked = [] @@ -111,7 +113,7 @@ def list_requirements(): for requirement in REQUIREMENTS: is_linked = False for link in linked: - if requirement.replace('-','_').startswith(link): + if requirement.replace('-', '_').startswith(link): is_linked = True if not is_linked: result.append(requirement) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index d43ade39dd..c4e7dfcf0e 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -15,12 +15,17 @@ from twisted.internet import defer -from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError, NotFoundError, \ - StoreError +from synapse.api.errors import ( + SynapseError, Codes, UnrecognizedRequestError, NotFoundError, StoreError +) from .base import ClientV1RestServlet, client_path_pattern -from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException +from synapse.storage.push_rule import ( + InconsistentRuleException, RuleNotFoundException +) import synapse.push.baserules as baserules -from synapse.push.rulekinds import PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP +from synapse.push.rulekinds import ( + PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP +) import json @@ -105,7 +110,9 @@ class PushRuleRestServlet(ClientV1RestServlet): # we build up the full structure and then decide which bits of it # to send which means doing unnecessary work sometimes but is # is probably not going to make a whole lot of difference - rawrules = yield self.hs.get_datastore().get_push_rules_for_user_name(user.to_string()) + rawrules = yield self.hs.get_datastore().get_push_rules_for_user_name( + user.to_string() + ) for r in rawrules: r["conditions"] = json.loads(r["conditions"]) @@ -383,6 +390,7 @@ def _namespaced_rule_id_from_spec(spec): def _rule_id_from_namespaced(in_rule_id): return in_rule_id.split('/')[-1] + class InvalidRuleException(Exception): pass diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index e10d2576d2..80e9939b79 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -34,8 +34,8 @@ class PusherRestServlet(ClientV1RestServlet): pusher_pool = self.hs.get_pusherpool() if ('pushkey' in content and 'app_id' in content - and 'kind' in content and - content['kind'] is None): + and 'kind' in content and + content['kind'] is None): yield pusher_pool.remove_pusher( content['app_id'], content['pushkey'] ) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 5b42782331..6df52ca434 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -38,9 +38,10 @@ class UploadResource(BaseMediaResource): def render_OPTIONS(self, request): respond_with_json(request, 200, {}, send_cors=True) return NOT_DONE_YET - + @defer.inlineCallbacks - def create_content(self, media_type, upload_name, content, content_length, auth_user): + def create_content(self, media_type, upload_name, content, content_length, + auth_user): media_id = random_string(24) fname = self.filepaths.local_media_filepath(media_id) @@ -65,7 +66,7 @@ class UploadResource(BaseMediaResource): } yield self._generate_local_thumbnails(media_id, media_info) - + defer.returnValue("mxc://%s/%s" % (self.server_name, media_id)) @defer.inlineCallbacks diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 36455ef93c..3e1ab0a159 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -84,7 +84,7 @@ class PerformanceCounters(object): def update(self, key, start_time, end_time=None): if end_time is None: - end_time = time.time() * 1000; + end_time = time.time() * 1000 duration = end_time - start_time count, cum_time = self.current_counters.get(key, (0, 0)) count += 1 @@ -588,7 +588,7 @@ class SQLBaseStore(object): "LIMIT 1 " ) - start_time = time.time() * 1000; + start_time = time.time() * 1000 txn.execute(sql, (event_id,)) @@ -613,7 +613,7 @@ class SQLBaseStore(object): def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted, check_redacted=True, get_prev_content=False): - start_time = time.time() * 1000; + start_time = time.time() * 1000 update_counter = self._get_event_counters.update d = json.loads(js) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 30e23445d9..620de71398 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -91,7 +91,9 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, (user_name, relative_to_rule)) res = txn.fetchall() if not res: - raise RuleNotFoundException("before/after rule not found: %s" % (relative_to_rule)) + raise RuleNotFoundException( + "before/after rule not found: %s" % (relative_to_rule,) + ) priority_class, base_rule_priority = res[0] if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class: -- cgit 1.5.1 From fd40d992adfb8b63f6e925dad030c63498501408 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 11 Feb 2015 10:41:33 +0000 Subject: PEP8-ify --- synapse/appservice/api.py | 2 -- synapse/handlers/directory.py | 3 --- synapse/rest/appservice/v1/register.py | 2 +- synapse/storage/appservice.py | 3 --- 4 files changed, 1 insertion(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 15ac1e27fc..6192813c03 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -71,7 +71,6 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning("query_alias to %s threw exception %s", uri, ex) defer.returnValue(False) - @defer.inlineCallbacks def push_bulk(self, service, events): events = self._serialize(events) @@ -107,4 +106,3 @@ class ApplicationServiceApi(SimpleHttpClient): return [ serialize_event(e, time_now, as_client_event=True) for e in events ] - diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 87bc12c983..20ab9e269c 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -58,7 +58,6 @@ class DirectoryHandler(BaseHandler): servers ) - @defer.inlineCallbacks def create_association(self, user_id, room_alias, room_id, servers=None): # association creation for human users @@ -75,7 +74,6 @@ class DirectoryHandler(BaseHandler): ) yield self._create_association(room_alias, room_id, servers) - @defer.inlineCallbacks def create_appservice_association(self, service, room_alias, room_id, servers=None): @@ -127,7 +125,6 @@ class DirectoryHandler(BaseHandler): # if room_id: # yield self._update_room_alias_events(user_id, room_id) - @defer.inlineCallbacks def get_association(self, room_alias): room_id = None diff --git a/synapse/rest/appservice/v1/register.py b/synapse/rest/appservice/v1/register.py index d3d5aef220..3bd0c1220c 100644 --- a/synapse/rest/appservice/v1/register.py +++ b/synapse/rest/appservice/v1/register.py @@ -65,7 +65,7 @@ class RegisterRestServlet(AppServiceRestServlet): hs_token = app_service.hs_token defer.returnValue((200, { - "hs_token": hs_token + "hs_token": hs_token })) def _parse_namespace(self, target_ns, origin_ns, ns): diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index ba31c68595..d941b1f387 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -97,7 +97,6 @@ class ApplicationServiceStore(SQLBaseStore): # allocate new ASes. It relies on the server admin inserting the AS # token into the database manually. - if not service.token or not service.url: raise StoreError(400, "Token and url must be specified.") @@ -186,7 +185,6 @@ class ApplicationServiceStore(SQLBaseStore): # TODO: The from_cache=False impl # TODO: This should be JOINed with the application_services_regex table. - @defer.inlineCallbacks def _populate_cache(self): """Populates the ApplicationServiceCache from the database.""" @@ -244,4 +242,3 @@ class ApplicationServiceStore(SQLBaseStore): hs_token=service["hs_token"], sender=service["sender"] )) - -- cgit 1.5.1 From 4ebbaf0d4382813ba896f3e8101de12e112cbed5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Feb 2015 14:23:10 +0000 Subject: Blunty replace json with simplejson --- synapse/crypto/keyclient.py | 2 +- synapse/federation/persistence.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/http/client.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/push/__init__.py | 2 +- synapse/push/pusherpool.py | 2 +- synapse/rest/client/v1/directory.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/presence.py | 2 +- synapse/rest/client/v1/profile.py | 2 +- synapse/rest/client/v1/push_rule.py | 2 +- synapse/rest/client/v1/pusher.py | 2 +- synapse/rest/client/v1/register.py | 2 +- synapse/rest/client/v1/room.py | 2 +- synapse/rest/client/v2_alpha/filter.py | 2 +- synapse/rest/media/v0/content_repository.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/filtering.py | 2 +- synapse/storage/push_rule.py | 2 +- 21 files changed, 21 insertions(+), 21 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index cd12349f67..74008347c3 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -19,7 +19,7 @@ from twisted.internet.protocol import Factory from twisted.internet import defer, reactor from synapse.http.endpoint import matrix_federation_endpoint from synapse.util.logcontext import PreserveLoggingContext -import json +import simplejson as json import logging diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 85c82a4623..8a1afc0ca5 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -23,7 +23,7 @@ from twisted.internet import defer from synapse.util.logutils import log_function -import json +import simplejson as json import logging diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 9c9f8d525b..2ffb37aa18 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -20,7 +20,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.util.logutils import log_function import logging -import json +import simplejson as json import re diff --git a/synapse/http/client.py b/synapse/http/client.py index 198f575cfa..d500e19c81 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -23,7 +23,7 @@ from twisted.web.http_headers import Headers from StringIO import StringIO -import json +import simplejson as json import logging import urllib diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 056d446e42..406203acf2 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -33,7 +33,7 @@ from synapse.api.errors import ( from syutil.crypto.jsonsign import sign_json -import json +import simplejson as json import logging import urllib import urlparse diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 418a348a58..0659a1cb9b 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -22,7 +22,7 @@ import synapse.util.async import baserules import logging -import json +import simplejson as json import re logger = logging.getLogger(__name__) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 5a525befd7..7483d257bf 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -20,7 +20,7 @@ from httppusher import HttpPusher from synapse.push import PusherConfigException import logging -import json +import simplejson as json logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 8ed7e2d669..420aa89f38 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -20,7 +20,7 @@ from synapse.api.errors import AuthError, SynapseError, Codes from synapse.types import RoomAlias from .base import ClientV1RestServlet, client_path_pattern -import json +import simplejson as json import logging diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 7116ac98e8..b2257b749d 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError from synapse.types import UserID from base import ClientV1RestServlet, client_path_pattern -import json +import simplejson as json class LoginRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 7feb4aadb1..78d4f2b128 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -21,7 +21,7 @@ from synapse.api.errors import SynapseError from synapse.types import UserID from .base import ClientV1RestServlet, client_path_pattern -import json +import simplejson as json import logging logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 15d6f3fc6c..1e77eb49cf 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -19,7 +19,7 @@ from twisted.internet import defer from .base import ClientV1RestServlet, client_path_pattern from synapse.types import UserID -import json +import simplejson as json class ProfileDisplaynameRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index c4e7dfcf0e..b012f31084 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -27,7 +27,7 @@ from synapse.push.rulekinds import ( PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP ) -import json +import simplejson as json class PushRuleRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 80e9939b79..6045e86f34 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError, Codes from synapse.push import PusherConfigException from .base import ClientV1RestServlet, client_path_pattern -import json +import simplejson as json class PusherRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index c0423c2d45..d3399c446b 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -25,7 +25,7 @@ from synapse.util.async import run_on_reactor from hashlib import sha1 import hmac -import json +import simplejson as json import logging import urllib diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 410f19ccf6..0346afb1b4 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.types import UserID, RoomID, RoomAlias from synapse.events.utils import serialize_event -import json +import simplejson as json import logging import urllib diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 6ddc495d23..703250cea8 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -21,7 +21,7 @@ from synapse.types import UserID from ._base import client_v2_pattern -import json +import simplejson as json import logging diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index 22e26e3cd5..e77a20fb2e 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -25,7 +25,7 @@ from twisted.web import server, resource from twisted.internet import defer import base64 -import json +import simplejson as json import logging import os import re diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a63c59a8a2..924ea89035 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -44,7 +44,7 @@ from syutil.jsonutil import encode_canonical_json from synapse.crypto.event_signing import compute_event_reference_hash -import json +import simplejson as json import logging import os diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 3e1ab0a159..b74e74ac91 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -23,7 +23,7 @@ from synapse.util.logcontext import PreserveLoggingContext, LoggingContext from twisted.internet import defer import collections -import json +import simplejson as json import sys import time diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index e86eeced45..457a11fd02 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -17,7 +17,7 @@ from twisted.internet import defer from ._base import SQLBaseStore -import json +import simplejson as json class FilteringStore(SQLBaseStore): diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 620de71398..ae46b39cc1 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -20,7 +20,7 @@ from twisted.internet import defer import logging import copy -import json +import simplejson as json logger = logging.getLogger(__name__) -- cgit 1.5.1 From f5a70e0d2e890adea53b3f6565a3bbe92512a506 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 11 Feb 2015 15:01:15 +0000 Subject: Add a cache for get_event --- synapse/config/_base.py | 10 ++++++++++ synapse/config/database.py | 5 +++++ synapse/storage/__init__.py | 3 +++ synapse/storage/_base.py | 24 +++++++++++++++++++++--- tests/storage/test_base.py | 5 +++-- tests/utils.py | 1 + 6 files changed, 43 insertions(+), 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 9b0f8c3c32..87cdbf1d30 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -27,6 +27,16 @@ class Config(object): def __init__(self, args): pass + @staticmethod + def parse_size(string): + sizes = {"K": 1024, "M": 1024 * 1024} + size = 1 + suffix = string[-1] + if suffix in sizes: + string = string[:-1] + size = sizes[suffix] + return int(string) * size + @staticmethod def abspath(file_path): return os.path.abspath(file_path) if file_path else file_path diff --git a/synapse/config/database.py b/synapse/config/database.py index daa161c952..87efe54645 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -24,6 +24,7 @@ class DatabaseConfig(Config): self.database_path = ":memory:" else: self.database_path = self.abspath(args.database_path) + self.event_cache_size = self.parse_size(args.event_cache_size) @classmethod def add_arguments(cls, parser): @@ -33,6 +34,10 @@ class DatabaseConfig(Config): "-d", "--database-path", default="homeserver.db", help="The database name." ) + db_group.add_argument( + "--event-cache-size", default="100K", + help="Number of events to cache in memory." + ) @classmethod def generate_config(cls, args, config_dir_path): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a63c59a8a2..1170d8b6ec 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -164,6 +164,9 @@ class DataStore(RoomMemberStore, RoomStore, stream_ordering=None, is_new_state=True, current_state=None): + # Remove the any existing cache entries for the event_id + self._get_event_cache.pop(event.event_id) + # We purposefully do this first since if we include a `current_state` # key, we *want* to update the `current_state_events` table if current_state: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 3e1ab0a159..f13b8f4fad 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -19,6 +19,7 @@ from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.util.logutils import log_function from synapse.util.logcontext import PreserveLoggingContext, LoggingContext +from synapse.util.lrucache import LruCache from twisted.internet import defer @@ -128,6 +129,8 @@ class SQLBaseStore(object): self._txn_perf_counters = PerformanceCounters() self._get_event_counters = PerformanceCounters() + self._get_event_cache = LruCache(hs.config.event_cache_size) + def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() @@ -579,6 +582,20 @@ class SQLBaseStore(object): def _get_event_txn(self, txn, event_id, check_redacted=True, get_prev_content=False, allow_rejected=False): + + start_time = time.time() * 1000 + update_counter = self._get_event_counters.update + + try: + cache = self._get_event_cache.setdefault(event_id, {}) + # Separate cache entries for each way to invoke _get_event_txn + return cache[(check_redacted, get_prev_content, allow_rejected)] + except KeyError: + pass + finally: + start_time = update_counter("event_cache", start_time) + + sql = ( "SELECT e.internal_metadata, e.json, r.event_id, rej.reason " "FROM event_json as e " @@ -588,7 +605,6 @@ class SQLBaseStore(object): "LIMIT 1 " ) - start_time = time.time() * 1000 txn.execute(sql, (event_id,)) @@ -599,14 +615,16 @@ class SQLBaseStore(object): internal_metadata, js, redacted, rejected_reason = res - self._get_event_counters.update("select_event", start_time) + start_time = update_counter("select_event", start_time) if allow_rejected or not rejected_reason: - return self._get_event_from_row_txn( + result = self._get_event_from_row_txn( txn, internal_metadata, js, redacted, check_redacted=check_redacted, get_prev_content=get_prev_content, ) + cache[(check_redacted, get_prev_content, allow_rejected)] = result + return result else: return None diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index a0a24ce096..55fbffa7a2 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -38,8 +38,9 @@ class SQLBaseStoreTestCase(unittest.TestCase): return defer.succeed(func(self.mock_txn, *args, **kwargs)) self.db_pool.runInteraction = runInteraction - hs = HomeServer("test", - db_pool=self.db_pool) + config = Mock() + config.event_cache_size = 1 + hs = HomeServer("test", db_pool=self.db_pool, config=config) self.datastore = SQLBaseStore(hs) diff --git a/tests/utils.py b/tests/utils.py index 25c33492a5..39895c739f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -41,6 +41,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): if config is None: config = Mock() config.signing_key = [MockKey()] + config.event_cache_size = 1 if datastore is None: db_pool = SQLiteMemoryDbPool() -- cgit 1.5.1 From aff892ce79b9cd5060d268eefb5876fc7caf30f0 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 11 Feb 2015 15:02:35 +0000 Subject: Fix formatting --- synapse/storage/_base.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index f13b8f4fad..29fc334f45 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -595,7 +595,6 @@ class SQLBaseStore(object): finally: start_time = update_counter("event_cache", start_time) - sql = ( "SELECT e.internal_metadata, e.json, r.event_id, rej.reason " "FROM event_json as e " @@ -605,7 +604,6 @@ class SQLBaseStore(object): "LIMIT 1 " ) - txn.execute(sql, (event_id,)) res = txn.fetchone() -- cgit 1.5.1 From ddc25cf4e2df10e2b175e70a2371a7e24081572f Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 11 Feb 2015 15:23:28 +0000 Subject: Invalidate the cache for an event if it is redacted --- synapse/storage/__init__.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 1170d8b6ec..a33e2298f6 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -423,6 +423,8 @@ class DataStore(RoomMemberStore, RoomStore, ) def _store_redaction(self, txn, event): + # invalidate the cache for the redacted event + self._get_event_cache.pop(event.redacts) txn.execute( "INSERT OR IGNORE INTO redactions " "(event_id, redacts) VALUES (?,?)", -- cgit 1.5.1 From ddb816cf60ca1b0c0f9bfab5df233a010ac309a3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Feb 2015 15:44:28 +0000 Subject: Don't unfreeze when using FreezeEvent.get_dict, as we are using a JSONEncoder that understands FrozenDict --- synapse/events/__init__.py | 4 ---- synapse/federation/persistence.py | 7 ++++--- synapse/handlers/federation.py | 15 ++++++++++++++- synapse/handlers/room.py | 4 +++- synapse/storage/__init__.py | 11 +++++++++-- synapse/util/frozenutils.py | 8 ++++++-- 6 files changed, 36 insertions(+), 13 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 8f0c6e959f..f4ec8cd18c 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -140,10 +140,6 @@ class FrozenEvent(EventBase): return e - def get_dict(self): - # We need to unfreeze what we return - return unfreeze(super(FrozenEvent, self).get_dict()) - def __str__(self): return self.__repr__() diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 8a1afc0ca5..76a9dcd777 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -23,7 +23,8 @@ from twisted.internet import defer from synapse.util.logutils import log_function -import simplejson as json +from syutil.jsonutil import encode_canonical_json + import logging @@ -70,7 +71,7 @@ class TransactionActions(object): transaction.transaction_id, transaction.origin, code, - json.dumps(response) + encode_canonical_json(response) ) @defer.inlineCallbacks @@ -100,5 +101,5 @@ class TransactionActions(object): transaction.transaction_id, transaction.destination, response_code, - json.dumps(response_dict) + encode_canonical_json(response_dict) ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0f9c82fd06..e36f0945ef 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -23,6 +23,7 @@ from synapse.api.errors import ( from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor +from synapse.util.frozenutils import unfreeze from synapse.crypto.event_signing import ( compute_event_signature, add_hashes_and_signatures, ) @@ -311,9 +312,12 @@ class FederationHandler(BaseHandler): self.room_queues[room_id] = [] builder = self.event_builder_factory.new( - event.get_pdu_json() + unfreeze(event.get_pdu_json()) ) + logger.info("Builder: %s", builder.get_pdu_json()) + logger.info("Content: %s", content) + handled_events = set() try: @@ -324,14 +328,21 @@ class FederationHandler(BaseHandler): if not hasattr(event, "signatures"): builder.signatures = {} + logger.info("Content befhahs: %s", builder.content) + add_hashes_and_signatures( builder, self.hs.hostname, self.hs.config.signing_key[0], ) + logger.info("Content aftet hah: %s", builder.content) + logger.info("Content pdu json: %s", builder.get_pdu_json()) + new_event = builder.build() + logger.info("Content after build: %s", new_event.content) + # Try the host we successfully got a response to /make_join/ # request first. try: @@ -340,6 +351,7 @@ class FederationHandler(BaseHandler): except ValueError: pass + logger.info(new_event.content) ret = yield self.replication_layer.send_join( target_hosts, new_event @@ -485,6 +497,7 @@ class FederationHandler(BaseHandler): event.internal_metadata.outlier = False + logger.info(event.content) context = yield self._handle_new_event(origin, event) logger.debug( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0369b907a5..c685991a9f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -403,7 +403,9 @@ class RoomMemberHandler(BaseHandler): "membership": Membership.JOIN, "content": content, }) + logger.info("Before build: %s", builder.get_pdu_json()) event, context = yield self._create_new_client_event(builder) + logger.info("After build: %s", event.get_dict()) yield self._do_join(event, context, room_hosts=hosts, do_auth=True) @@ -462,7 +464,7 @@ class RoomMemberHandler(BaseHandler): room_hosts, room_id, event.user_id, - event.get_dict()["content"], # FIXME To get a non-frozen dict + event.content, # FIXME To get a non-frozen dict context ) else: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 924ea89035..94603ef826 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -298,12 +298,16 @@ class DataStore(RoomMemberStore, RoomStore, or_replace=True, ) + content = encode_canonical_json( + event.content + ).decode("UTF-8") + vals = { "topological_ordering": event.depth, "event_id": event.event_id, "type": event.type, "room_id": event.room_id, - "content": json.dumps(event.get_dict()["content"]), + "content": content, "processed": True, "outlier": outlier, "depth": event.depth, @@ -323,7 +327,10 @@ class DataStore(RoomMemberStore, RoomStore, "prev_events", ] } - vals["unrecognized_keys"] = json.dumps(unrec) + + vals["unrecognized_keys"] = encode_canonical_json( + unrec + ).decode("UTF-8") try: self._simple_insert_txn( diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index a13a2015e4..9e10d37aec 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -21,6 +21,9 @@ def freeze(o): if t is dict: return frozendict({k: freeze(v) for k, v in o.items()}) + if t is frozendict: + return o + if t is str or t is unicode: return o @@ -33,10 +36,11 @@ def freeze(o): def unfreeze(o): - if isinstance(o, frozendict) or isinstance(o, dict): + t = type(o) + if t is dict or t is frozendict: return dict({k: unfreeze(v) for k, v in o.items()}) - if isinstance(o, basestring): + if t is str or t is unicode: return o try: -- cgit 1.5.1 From 7be0f6594e2a6dd7c3dc745eb856025276ec7d1f Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Wed, 11 Feb 2015 15:53:56 +0000 Subject: First step of making user_rooms_intersect() faster - implement in intersection logic in Python code terms of a DB query that is cacheable per user --- synapse/storage/roommember.py | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index c69dd995ce..d490a374e5 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -240,28 +240,30 @@ class RoomMemberStore(SQLBaseStore): results = self._parse_events_txn(txn, rows) return results + @defer.inlineCallbacks def user_rooms_intersect(self, user_id_list): """ Checks whether all the users whose IDs are given in a list share a room. + + This is a "hot path" function that's called a lot, e.g. by presence for + generating the event stream. """ - def interaction(txn): - user_list_clause = " OR ".join(["m.user_id = ?"] * len(user_id_list)) - sql = ( - "SELECT m.room_id FROM room_memberships as m " - "INNER JOIN current_state_events as c " - "ON m.event_id = c.event_id " - "WHERE m.membership = 'join' " - "AND (%(clause)s) " - # TODO(paul): We've got duplicate rows in the database somewhere - # so we have to DISTINCT m.user_id here - "GROUP BY m.room_id HAVING COUNT(DISTINCT m.user_id) = ?" - ) % {"clause": user_list_clause} - - args = list(user_id_list) - args.append(len(user_id_list)) + if len(user_id_list) < 2: + defer.returnValue(True) - txn.execute(sql, args) + deferreds = [ + self.get_rooms_for_user_where_membership_is( + u, membership_list=[Membership.JOIN], + ) + for u in user_id_list + ] + + results = yield defer.DeferredList(deferreds) + + # A list of sets of strings giving room IDs for each user + room_id_lists = [set([r.room_id for r in result[1]]) for result in results] - return len(txn.fetchall()) > 0 + # There isn't a setintersection(*list_of_sets) + ret = len(room_id_lists.pop(0).intersection(*room_id_lists)) > 0 - return self.runInteraction("user_rooms_intersect", interaction) + defer.returnValue(ret) -- cgit 1.5.1 From 45b56609ae84e7ffc3713335e7d9abc315ad1725 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Wed, 11 Feb 2015 16:04:08 +0000 Subject: Cache the result of a get_rooms_for_user query, to make user_rooms_intersect() much lighter in the read-common case --- synapse/storage/roommember.py | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index d490a374e5..e05465bc13 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -35,6 +35,11 @@ RoomsForUser = namedtuple( class RoomMemberStore(SQLBaseStore): + def __init__(self, *args, **kw): + super(RoomMemberStore, self).__init__(*args, **kw) + + self._user_rooms_cache = {} + def _store_room_member_txn(self, txn, event): """Store a room member in the database. """ @@ -98,6 +103,8 @@ class RoomMemberStore(SQLBaseStore): txn.execute(sql, (event.room_id, domain)) + self.invalidate_rooms_for_user(target_user_id) + @defer.inlineCallbacks def get_room_member(self, user_id, room_id): """Retrieve the current state of a room member. @@ -240,23 +247,43 @@ class RoomMemberStore(SQLBaseStore): results = self._parse_events_txn(txn, rows) return results + # TODO(paul): Create a nice @cached decorator to do this + # @cached + # def get_foo(...) + # ... + # invalidate_foo = get_foo.invalidator + + @defer.inlineCallbacks + def get_rooms_for_user(self, user_id): + # TODO(paul): put some performance counters in here so we can easily + # track what impact this cache is having + if user_id in self._user_rooms_cache: + defer.returnValue(self._user_rooms_cache[user_id]) + + rooms = yield self.get_rooms_for_user_where_membership_is( + user_id, membership_list=[Membership.JOIN], + ) + + self._user_rooms_cache[user_id] = rooms + defer.returnValue(rooms) + + def invalidate_rooms_for_user(self, user_id): + if user_id in self._user_rooms_cache: + del self._user_rooms_cache[user_id] + @defer.inlineCallbacks def user_rooms_intersect(self, user_id_list): """ Checks whether all the users whose IDs are given in a list share a room. This is a "hot path" function that's called a lot, e.g. by presence for - generating the event stream. + generating the event stream. As such, it is implemented locally by + wrapping logic around heavily-cached database queries. """ if len(user_id_list) < 2: defer.returnValue(True) - deferreds = [ - self.get_rooms_for_user_where_membership_is( - u, membership_list=[Membership.JOIN], - ) - for u in user_id_list - ] + deferreds = [self.get_rooms_for_user(u) for u in user_id_list] results = yield defer.DeferredList(deferreds) -- cgit 1.5.1 From 7f47ba7383302fdbdaa3a10abef00d3710c77fce Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Wed, 11 Feb 2015 16:18:21 +0000 Subject: Added another TODO note --- synapse/storage/roommember.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index e05465bc13..779f9ce544 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -264,6 +264,9 @@ class RoomMemberStore(SQLBaseStore): user_id, membership_list=[Membership.JOIN], ) + # TODO(paul): Consider applying a maximum size; just evict things at + # random, or consider LRU? + self._user_rooms_cache[user_id] = rooms defer.returnValue(rooms) -- cgit 1.5.1 From 7eef84a95b53318e97ab74dd25fee584714a8e6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Feb 2015 16:52:22 +0000 Subject: pyflakes --- synapse/events/__init__.py | 2 +- synapse/storage/__init__.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index f4ec8cd18c..64e08223b0 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.frozenutils import freeze, unfreeze +from synapse.util.frozenutils import freeze class _EventInternalMetadata(object): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 94603ef826..a0284d54b9 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -44,7 +44,6 @@ from syutil.jsonutil import encode_canonical_json from synapse.crypto.event_signing import compute_event_reference_hash -import simplejson as json import logging import os -- cgit 1.5.1 From 8bbdf328498ef9a7e6c46e565195783b208bfeb2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Feb 2015 18:56:13 +0000 Subject: Convert get_rooms to use runInteraction so the transacion has a more helpful description --- synapse/storage/room.py | 67 +++++++++++++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 30 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 6542f8e4f8..750b17a45f 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -82,38 +82,45 @@ class RoomStore(SQLBaseStore): "topic" key if one is set, and a "name" key if one is set """ - topic_subquery = ( - "SELECT topics.event_id as event_id, " - "topics.room_id as room_id, topic " - "FROM topics " - "INNER JOIN current_state_events as c " - "ON c.event_id = topics.event_id " - ) + def f(txn): + topic_subquery = ( + "SELECT topics.event_id as event_id, " + "topics.room_id as room_id, topic " + "FROM topics " + "INNER JOIN current_state_events as c " + "ON c.event_id = topics.event_id " + ) - name_subquery = ( - "SELECT room_names.event_id as event_id, " - "room_names.room_id as room_id, name " - "FROM room_names " - "INNER JOIN current_state_events as c " - "ON c.event_id = room_names.event_id " - ) + name_subquery = ( + "SELECT room_names.event_id as event_id, " + "room_names.room_id as room_id, name " + "FROM room_names " + "INNER JOIN current_state_events as c " + "ON c.event_id = room_names.event_id " + ) - # We use non printing ascii character US () as a seperator - sql = ( - "SELECT r.room_id, n.name, t.topic, " - "group_concat(a.room_alias, '') " - "FROM rooms AS r " - "LEFT JOIN (%(topic)s) AS t ON t.room_id = r.room_id " - "LEFT JOIN (%(name)s) AS n ON n.room_id = r.room_id " - "INNER JOIN room_aliases AS a ON a.room_id = r.room_id " - "WHERE r.is_public = ? " - "GROUP BY r.room_id " - ) % { - "topic": topic_subquery, - "name": name_subquery, - } - - rows = yield self._execute(None, sql, is_public) + # We use non printing ascii character US () as a seperator + sql = ( + "SELECT r.room_id, n.name, t.topic, " + "group_concat(a.room_alias, '') " + "FROM rooms AS r " + "LEFT JOIN (%(topic)s) AS t ON t.room_id = r.room_id " + "LEFT JOIN (%(name)s) AS n ON n.room_id = r.room_id " + "INNER JOIN room_aliases AS a ON a.room_id = r.room_id " + "WHERE r.is_public = ? " + "GROUP BY r.room_id " + ) % { + "topic": topic_subquery, + "name": name_subquery, + } + + c = txn.execute(sql, (is_public,)) + + return c.fetchall() + + rows = yield self.runInteraction( + "get_rooms", f + ) ret = [ { -- cgit 1.5.1 From 83d41f25d8c57a2fa3a5feed4b952690abbd57a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Feb 2015 10:05:47 +0000 Subject: Set database schema version in delta --- synapse/storage/schema/delta/v12.sql | 2 ++ 1 file changed, 2 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql index 302d958dbf..b87ef1fe79 100644 --- a/synapse/storage/schema/delta/v12.sql +++ b/synapse/storage/schema/delta/v12.sql @@ -63,3 +63,5 @@ CREATE TABLE IF NOT EXISTS user_filters( CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( user_id, filter_id ); + +PRAGMA user_version = 12; -- cgit 1.5.1 From 717687e1fc61a0fa441ff0dee2d49fda518c06bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Feb 2015 14:39:31 +0000 Subject: Get an auth query one at a time --- synapse/storage/event_federation.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 0cbcdd1b55..3fbc090224 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -55,17 +55,16 @@ class EventFederationStore(SQLBaseStore): results = set() base_sql = ( - "SELECT auth_id FROM event_auth WHERE %s" + "SELECT auth_id FROM event_auth WHERE event_id = ?" ) front = set(event_ids) while front: - sql = base_sql % ( - " OR ".join(["event_id=?"] * len(front)), - ) - - txn.execute(sql, list(front)) - front = [r[0] for r in txn.fetchall()] + new_front = set() + for f in front: + txn.execute(base_sql, (f,)) + new_front.update([r[0] for r in txn.fetchall()]) + front = new_front results.update(front) return list(results) -- cgit 1.5.1 From 183b3d4e47dd2fc0e0ca88714d0e0bd415f81736 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 13 Feb 2015 14:29:49 +0000 Subject: Prepare the database whenever a connection is opened from the db_pool so that in-memory databases will work --- synapse/app/homeserver.py | 14 ++++---------- synapse/storage/__init__.py | 3 +++ 2 files changed, 7 insertions(+), 10 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index f5681fac20..6f39819d3a 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -90,7 +90,9 @@ class SynapseHomeServer(HomeServer): "sqlite3", self.get_db_name(), check_same_thread=False, cp_min=1, - cp_max=1 + cp_max=1, + cp_openfun=prepare_database, # Prepare the database for each conn + # so that :memory: sqlite works ) def create_resource_tree(self, web_client, redirect_root_to_web_client): @@ -252,14 +254,6 @@ def setup(): logger.info("Database prepared in %s.", db_name) - db_pool = hs.get_db_pool() - - if db_name == ":memory:": - # Memory databases will need to be setup each time they are opened. - reactor.callWhenRunning( - db_pool.runWithConnection, prepare_database - ) - if config.manhole: f = twisted.manhole.telnet.ShellFactory() f.username = "matrix" @@ -270,10 +264,10 @@ def setup(): bind_port = config.bind_port if config.no_tls: bind_port = None + hs.start_listening(bind_port, config.unsecure_port) hs.get_pusherpool().start() - hs.get_state_handler().start_caching() hs.get_datastore().start_profiling() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 02b1f06854..1c22e19ab0 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -637,10 +637,13 @@ def prepare_database(db_conn): c.executescript(sql_script) db_conn.commit() + else: + logger.info("Database is at version %r", user_version) else: sql_script = "BEGIN TRANSACTION;\n" for sql_loc in SCHEMAS: + logger.debug("Applying schema %r", sql_loc) sql_script += read_schema(sql_loc) sql_script += "\n" sql_script += "COMMIT TRANSACTION;" -- cgit 1.5.1 From 8d5cce62ab4bbb2333f01abb9edf7e94f6d6f7d4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 13 Feb 2015 16:16:16 +0000 Subject: Update pushers by app id and pushkey, not user id and pushkey --- synapse/push/__init__.py | 12 ++++++------ synapse/storage/pusher.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 0659a1cb9b..0fb3e4f7f3 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -237,7 +237,7 @@ class Pusher(object): self.user_name, config, timeout=0) self.last_token = chunk['end'] self.store.update_pusher_last_token( - self.user_name, self.pushkey, self.last_token) + self.app_id, self.pushkey, self.last_token) logger.info("Pusher %s for user %s starting from token %s", self.pushkey, self.user_name, self.last_token) @@ -308,7 +308,7 @@ class Pusher(object): self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token_and_success( - self.user_name, + self.app_id, self.pushkey, self.last_token, self.clock.time_msec() @@ -316,14 +316,14 @@ class Pusher(object): if self.failing_since: self.failing_since = None self.store.update_pusher_failing_since( - self.user_name, + self.app_id, self.pushkey, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() self.store.update_pusher_failing_since( - self.user_name, + self.app_id, self.pushkey, self.failing_since ) @@ -340,14 +340,14 @@ class Pusher(object): self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token( - self.user_name, + self.app_id, self.pushkey, self.last_token ) self.failing_since = None self.store.update_pusher_failing_since( - self.user_name, + self.app_id, self.pushkey, self.failing_since ) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index e2a662a6c7..6622b4d18a 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -126,27 +126,27 @@ class PusherStore(SQLBaseStore): ) @defer.inlineCallbacks - def update_pusher_last_token(self, user_name, pushkey, last_token): + def update_pusher_last_token(self, app_id, pushkey, last_token): yield self._simple_update_one( PushersTable.table_name, - {'user_name': user_name, 'pushkey': pushkey}, + {'app_id': app_id, 'pushkey': pushkey}, {'last_token': last_token} ) @defer.inlineCallbacks - def update_pusher_last_token_and_success(self, user_name, pushkey, + def update_pusher_last_token_and_success(self, app_id, pushkey, last_token, last_success): yield self._simple_update_one( PushersTable.table_name, - {'user_name': user_name, 'pushkey': pushkey}, + {'app_id': app_id, 'pushkey': pushkey}, {'last_token': last_token, 'last_success': last_success} ) @defer.inlineCallbacks - def update_pusher_failing_since(self, user_name, pushkey, failing_since): + def update_pusher_failing_since(self, app_id, pushkey, failing_since): yield self._simple_update_one( PushersTable.table_name, - {'user_name': user_name, 'pushkey': pushkey}, + {'app_id': app_id, 'pushkey': pushkey}, {'failing_since': failing_since} ) -- cgit 1.5.1 From 72a4de2ce627528a13bda480403344ffde6275d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Feb 2015 10:03:23 +0000 Subject: Use consumeErrors=True on all DeferredLists. This is so that the DeferredLists actually consume the error instead of propogating down the non-existent errback chain. This should reduce the number of unhandled errors we are seeing. --- synapse/federation/federation_server.py | 2 +- synapse/federation/transaction_queue.py | 2 +- synapse/handlers/presence.py | 8 ++++---- synapse/notifier.py | 6 ++++-- synapse/storage/roommember.py | 2 +- 5 files changed, 11 insertions(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9f5c98694c..e94d0411b4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -124,7 +124,7 @@ class FederationServer(FederationBase): edu.content ) - results = yield defer.DeferredList(dl) + results = yield defer.DeferredList(dl, consumeErrors=True) ret = [] for r in results: diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 731019ad9f..bb20f2ebab 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -98,7 +98,7 @@ class TransactionQueue(object): deferreds.append(deferred) - yield defer.DeferredList(deferreds) + yield defer.DeferredList(deferreds, consumeErrors=True) # NO inlineCallbacks def enqueue_edu(self, edu): diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 59287010ed..8ef248ecf2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -492,7 +492,7 @@ class PresenceHandler(BaseHandler): user, domain, remoteusers )) - yield defer.DeferredList(deferreds) + yield defer.DeferredList(deferreds, consumeErrors=True) def _start_polling_local(self, user, target_user): target_localpart = target_user.localpart @@ -548,7 +548,7 @@ class PresenceHandler(BaseHandler): self._stop_polling_remote(user, domain, remoteusers) ) - return defer.DeferredList(deferreds) + return defer.DeferredList(deferreds, consumeErrors=True) def _stop_polling_local(self, user, target_user): for localpart in self._local_pushmap.keys(): @@ -729,7 +729,7 @@ class PresenceHandler(BaseHandler): del self._remote_sendmap[user] with PreserveLoggingContext(): - yield defer.DeferredList(deferreds) + yield defer.DeferredList(deferreds, consumeErrors=True) @defer.inlineCallbacks def push_update_to_local_and_remote(self, observed_user, statuscache, @@ -768,7 +768,7 @@ class PresenceHandler(BaseHandler): ) ) - yield defer.DeferredList(deferreds) + yield defer.DeferredList(deferreds, consumeErrors=True) defer.returnValue((localusers, remote_domains)) diff --git a/synapse/notifier.py b/synapse/notifier.py index e3b6ead620..f5a394596d 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -135,7 +135,8 @@ class Notifier(object): with PreserveLoggingContext(): yield defer.DeferredList( - [notify(l).addErrback(eb) for l in listeners] + [notify(l).addErrback(eb) for l in listeners], + consumeErrors=True, ) @defer.inlineCallbacks @@ -203,7 +204,8 @@ class Notifier(object): with PreserveLoggingContext(): yield defer.DeferredList( - [notify(l).addErrback(eb) for l in listeners] + [notify(l).addErrback(eb) for l in listeners], + consumeErrors=True, ) @defer.inlineCallbacks diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 779f9ce544..9bf608bc90 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -288,7 +288,7 @@ class RoomMemberStore(SQLBaseStore): deferreds = [self.get_rooms_for_user(u) for u in user_id_list] - results = yield defer.DeferredList(deferreds) + results = yield defer.DeferredList(deferreds, consumeErrors=True) # A list of sets of strings giving room IDs for each user room_id_lists = [set([r.room_id for r in result[1]]) for result in results] -- cgit 1.5.1 From 1a989c436cda4926f11b04b4f26d83e7d3ce9ef5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Feb 2015 15:45:55 +0000 Subject: Bump schema version --- synapse/storage/__init__.py | 2 +- synapse/storage/schema/delta/v13.sql | 34 ++++++++++++++++++++++++++++++++++ synapse/storage/schema/delta/v14.sql | 34 ---------------------------------- 3 files changed, 35 insertions(+), 35 deletions(-) create mode 100644 synapse/storage/schema/delta/v13.sql delete mode 100644 synapse/storage/schema/delta/v14.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c6e96b842f..ec701014a9 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -74,7 +74,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 12 +SCHEMA_VERSION = 13 class _RollbackButIsFineException(Exception): diff --git a/synapse/storage/schema/delta/v13.sql b/synapse/storage/schema/delta/v13.sql new file mode 100644 index 0000000000..e491ad5aec --- /dev/null +++ b/synapse/storage/schema/delta/v13.sql @@ -0,0 +1,34 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS application_services( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT, + token TEXT, + hs_token TEXT, + sender TEXT, + UNIQUE(token) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS application_services_regex( + id INTEGER PRIMARY KEY AUTOINCREMENT, + as_id INTEGER NOT NULL, + namespace INTEGER, /* enum[room_id|room_alias|user_id] */ + regex TEXT, + FOREIGN KEY(as_id) REFERENCES application_services(id) +); + + + diff --git a/synapse/storage/schema/delta/v14.sql b/synapse/storage/schema/delta/v14.sql deleted file mode 100644 index e491ad5aec..0000000000 --- a/synapse/storage/schema/delta/v14.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS application_services( - id INTEGER PRIMARY KEY AUTOINCREMENT, - url TEXT, - token TEXT, - hs_token TEXT, - sender TEXT, - UNIQUE(token) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS application_services_regex( - id INTEGER PRIMARY KEY AUTOINCREMENT, - as_id INTEGER NOT NULL, - namespace INTEGER, /* enum[room_id|room_alias|user_id] */ - regex TEXT, - FOREIGN KEY(as_id) REFERENCES application_services(id) -); - - - -- cgit 1.5.1 From 2c29ed3e847285973ab552b9617750e1ba6693e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Feb 2015 17:22:24 +0000 Subject: Use absolute path when loading delta sql files --- synapse/storage/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index ec701014a9..d16e7b8fac 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -76,6 +76,8 @@ SCHEMAS = [ # database schema files, so the users will be informed on server restarts. SCHEMA_VERSION = 13 +dir_path = os.path.abspath(os.path.dirname(__file__)) + class _RollbackButIsFineException(Exception): """ This exception is used to rollback a transaction without implying @@ -583,7 +585,6 @@ def schema_path(schema): A filesystem path pointing at a ".sql" file. """ - dir_path = os.path.dirname(__file__) schemaPath = os.path.join(dir_path, "schema", schema + ".sql") return schemaPath -- cgit 1.5.1 From 61959928bb4b6b0191d301f1b267af7290a61bd2 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Thu, 19 Feb 2015 14:58:07 +0000 Subject: Pull out the 'get_rooms_for_user' cache logic into a reüsable @cached decorator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- synapse/storage/roommember.py | 53 +++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 24 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 9bf608bc90..569bd55d0f 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -33,6 +33,32 @@ RoomsForUser = namedtuple( ) +# TODO(paul): +# * Move this somewhere higher-level, shared; +# * more generic key management +# * export monitoring stats +# * maximum size; just evict things at random, or consider LRU? +def cached(orig): + cache = {} + + @defer.inlineCallbacks + def wrapped(self, key): + if key in cache: + defer.returnValue(cache[key]) + + ret = yield orig(self, key) + + cache[key] = ret; + defer.returnValue(ret) + + def invalidate(key): + if key in cache: + del cache[key] + + wrapped.invalidate = invalidate + return wrapped + + class RoomMemberStore(SQLBaseStore): def __init__(self, *args, **kw): @@ -103,7 +129,7 @@ class RoomMemberStore(SQLBaseStore): txn.execute(sql, (event.room_id, domain)) - self.invalidate_rooms_for_user(target_user_id) + self.get_rooms_for_user.invalidate(target_user_id) @defer.inlineCallbacks def get_room_member(self, user_id, room_id): @@ -247,33 +273,12 @@ class RoomMemberStore(SQLBaseStore): results = self._parse_events_txn(txn, rows) return results - # TODO(paul): Create a nice @cached decorator to do this - # @cached - # def get_foo(...) - # ... - # invalidate_foo = get_foo.invalidator - - @defer.inlineCallbacks + @cached def get_rooms_for_user(self, user_id): - # TODO(paul): put some performance counters in here so we can easily - # track what impact this cache is having - if user_id in self._user_rooms_cache: - defer.returnValue(self._user_rooms_cache[user_id]) - - rooms = yield self.get_rooms_for_user_where_membership_is( + return self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN], ) - # TODO(paul): Consider applying a maximum size; just evict things at - # random, or consider LRU? - - self._user_rooms_cache[user_id] = rooms - defer.returnValue(rooms) - - def invalidate_rooms_for_user(self, user_id): - if user_id in self._user_rooms_cache: - del self._user_rooms_cache[user_id] - @defer.inlineCallbacks def user_rooms_intersect(self, user_id_list): """ Checks whether all the users whose IDs are given in a list share a -- cgit 1.5.1 From 0ac2a79faa918280767c18e4db7ec29d7d3a3afb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Feb 2015 17:24:14 +0000 Subject: Initial stab at implementing a batched get_missing_pdus request --- synapse/federation/federation_server.py | 72 +++++++++++++++++++++++++++++++++ synapse/handlers/federation.py | 9 +++-- synapse/storage/event_federation.py | 63 ++++++++++++++++++++++++++--- 3 files changed, 135 insertions(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 22b9663831..34bc397e8a 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -305,6 +305,78 @@ class FederationServer(FederationBase): (200, send_content) ) + @defer.inlineCallbacks + def get_missing_events(self, origin, room_id, earliest_events, + latest_events, limit, min_depth): + limit = max(limit, 50) + min_depth = max(min_depth, 0) + + missing_events = yield self.store.get_missing_events( + room_id=room_id, + earliest_events=earliest_events, + latest_events=latest_events, + limit=limit, + min_depth=min_depth, + ) + + known_ids = {e.event_id for e in missing_events} | {earliest_events} + + back_edges = { + e for e in missing_events + if {i for i, h in e.prev_events.items()} <= known_ids + } + + decoded_auth_events = set() + state = {} + auth_events = set() + auth_and_state = {} + for event in back_edges: + state_pdus = yield self.handler.get_state_for_pdu( + origin, room_id, event.event_id, + do_auth=False, + ) + + state[event.event_id] = [s.event_id for s in state_pdus] + + auth_and_state.update({ + s.event_id: s for s in state_pdus + }) + + state_ids = {pdu.event_id for pdu in state_pdus} + prev_ids = {i for i, h in event.prev_events.items()} + partial_auth_chain = yield self.store.get_auth_chain( + state_ids | prev_ids, have_ids=decoded_auth_events.keys() + ) + + for p in partial_auth_chain: + p.signatures.update( + compute_event_signature( + p, + self.hs.hostname, + self.hs.config.signing_key[0] + ) + ) + + auth_events.update( + a.event_id for a in partial_auth_chain + ) + + auth_and_state.update({ + a.event_id: a for a in partial_auth_chain + }) + + time_now = self._clock.time_msec() + + defer.returnValue({ + "events": [ev.get_pdu_json(time_now) for ev in missing_events], + "state_for_events": state, + "auth_events": auth_events, + "event_map": { + k: ev.get_pdu_json(time_now) + for k, ev in auth_and_state.items() + }, + }) + @log_function def _get_persisted_pdu(self, origin, event_id, do_auth=True): """ Get a PDU from the database with given origin and id. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0eb2ff95ca..26bdc6d1a7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -581,12 +581,13 @@ class FederationHandler(BaseHandler): defer.returnValue(event) @defer.inlineCallbacks - def get_state_for_pdu(self, origin, room_id, event_id): + def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True): yield run_on_reactor() - in_room = yield self.auth.check_host_in_room(room_id, origin) - if not in_room: - raise AuthError(403, "Host not in room.") + if do_auth: + in_room = yield self.auth.check_host_in_room(room_id, origin) + if not in_room: + raise AuthError(403, "Host not in room.") state_groups = yield self.store.get_state_groups( [event_id] diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 3fbc090224..22bf7ad832 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -32,15 +32,15 @@ class EventFederationStore(SQLBaseStore): and backfilling from another server respectively. """ - def get_auth_chain(self, event_ids): + def get_auth_chain(self, event_ids, have_ids=set()): return self.runInteraction( "get_auth_chain", self._get_auth_chain_txn, - event_ids + event_ids, have_ids ) - def _get_auth_chain_txn(self, txn, event_ids): - results = self._get_auth_chain_ids_txn(txn, event_ids) + def _get_auth_chain_txn(self, txn, event_ids, have_ids): + results = self._get_auth_chain_ids_txn(txn, event_ids, have_ids) return self._get_events_txn(txn, results) @@ -51,8 +51,9 @@ class EventFederationStore(SQLBaseStore): event_ids ) - def _get_auth_chain_ids_txn(self, txn, event_ids): + def _get_auth_chain_ids_txn(self, txn, event_ids, have_ids): results = set() + have_ids = set(have_ids) base_sql = ( "SELECT auth_id FROM event_auth WHERE event_id = ?" @@ -64,6 +65,10 @@ class EventFederationStore(SQLBaseStore): for f in front: txn.execute(base_sql, (f,)) new_front.update([r[0] for r in txn.fetchall()]) + + new_front -= results + new_front -= have_ids + front = new_front results.update(front) @@ -378,3 +383,51 @@ class EventFederationStore(SQLBaseStore): event_results += new_front return self._get_events_txn(txn, event_results) + + def get_missing_events(self, room_id, earliest_events, latest_events, + limit, min_depth): + return self.runInteraction( + "get_missing_events", + self._get_missing_events, + room_id, earliest_events, latest_events, limit, min_depth + ) + + def _get_missing_events(self, txn, room_id, earliest_events, latest_events, + limit, min_depth): + + earliest_events = set(earliest_events) + front = set(latest_events) - earliest_events + + event_results = set() + + query = ( + "SELECT prev_event_id FROM event_edges " + "WHERE room_id = ? AND event_id = ? AND is_state = 0 " + "LIMIT ?" + ) + + while front and len(event_results) < limit: + new_front = set() + for event_id in front: + txn.execute( + query, + (room_id, event_id, limit - len(event_results)) + ) + + for e_id, in txn.fetchall(): + new_front.add(e_id) + + new_front -= earliest_events + new_front -= event_results + + front = new_front + event_results |= new_front + + events = self._get_events_txn(txn, event_results) + + events = sorted( + [ev for ev in events if ev.depth >= min_depth], + key=lambda e: e.depth, + ) + + return events[:limit] -- cgit 1.5.1 From 077d20034278ea57c57d501de11bfb1f0c7f9603 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Thu, 19 Feb 2015 17:29:39 +0000 Subject: Move @cached decorator out into synapse.storage._base; add minimal docs --- synapse/storage/_base.py | 35 +++++++++++++++++++++++++++++++++++ synapse/storage/roommember.py | 28 +--------------------------- 2 files changed, 36 insertions(+), 27 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index be9934c66f..fd275039be 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -35,6 +35,41 @@ sql_logger = logging.getLogger("synapse.storage.SQL") transaction_logger = logging.getLogger("synapse.storage.txn") +# TODO(paul): +# * Move this somewhere higher-level, shared; +# * more generic key management +# * export monitoring stats +# * maximum size; just evict things at random, or consider LRU? +def cached(orig): + """ A method decorator that applies a memoizing cache around the function. + + The function is presumed to take one additional argument, which is used as + the key for the cache. Cache hits are served directly from the cache; + misses use the function body to generate the value. + + The wrapped function has an additional member, a callable called + "invalidate". This can be used to remove individual entries from the cache. + """ + cache = {} + + @defer.inlineCallbacks + def wrapped(self, key): + if key in cache: + defer.returnValue(cache[key]) + + ret = yield orig(self, key) + + cache[key] = ret; + defer.returnValue(ret) + + def invalidate(key): + if key in cache: + del cache[key] + + wrapped.invalidate = invalidate + return wrapped + + class LoggingTransaction(object): """An object that almost-transparently proxies for the 'txn' object passed to the constructor. Adds logging to the .execute() method.""" diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 569bd55d0f..b8fcc1927e 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -17,7 +17,7 @@ from twisted.internet import defer from collections import namedtuple -from ._base import SQLBaseStore +from ._base import SQLBaseStore, cached from synapse.api.constants import Membership from synapse.types import UserID @@ -33,32 +33,6 @@ RoomsForUser = namedtuple( ) -# TODO(paul): -# * Move this somewhere higher-level, shared; -# * more generic key management -# * export monitoring stats -# * maximum size; just evict things at random, or consider LRU? -def cached(orig): - cache = {} - - @defer.inlineCallbacks - def wrapped(self, key): - if key in cache: - defer.returnValue(cache[key]) - - ret = yield orig(self, key) - - cache[key] = ret; - defer.returnValue(ret) - - def invalidate(key): - if key in cache: - del cache[key] - - wrapped.invalidate = invalidate - return wrapped - - class RoomMemberStore(SQLBaseStore): def __init__(self, *args, **kw): -- cgit 1.5.1 From ebc3db295bfe6d0c43bf45b8fcd7fa6bbc429375 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Thu, 19 Feb 2015 18:36:02 +0000 Subject: Take named arguments to @cached() decorator, add a 'max_entries' limit --- synapse/storage/_base.py | 39 +++++++++++-------- synapse/storage/roommember.py | 2 +- tests/storage/test__base.py | 89 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 16 deletions(-) create mode 100644 tests/storage/test__base.py (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fd275039be..61657d36ed 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -39,8 +39,8 @@ transaction_logger = logging.getLogger("synapse.storage.txn") # * Move this somewhere higher-level, shared; # * more generic key management # * export monitoring stats -# * maximum size; just evict things at random, or consider LRU? -def cached(orig): +# * consider other eviction strategies - LRU? +def cached(max_entries=1000): """ A method decorator that applies a memoizing cache around the function. The function is presumed to take one additional argument, which is used as @@ -50,24 +50,33 @@ def cached(orig): The wrapped function has an additional member, a callable called "invalidate". This can be used to remove individual entries from the cache. """ - cache = {} + def wrap(orig): + cache = {} - @defer.inlineCallbacks - def wrapped(self, key): - if key in cache: - defer.returnValue(cache[key]) + @defer.inlineCallbacks + def wrapped(self, key): + if key in cache: + defer.returnValue(cache[key]) + + ret = yield orig(self, key) + + while len(cache) > max_entries: + # TODO(paul): This feels too biased. However, a random index + # would be a bit inefficient, walking the list of keys just + # to ignore most of them? + del cache[cache.keys()[0]] - ret = yield orig(self, key) + cache[key] = ret; + defer.returnValue(ret) - cache[key] = ret; - defer.returnValue(ret) + def invalidate(key): + if key in cache: + del cache[key] - def invalidate(key): - if key in cache: - del cache[key] + wrapped.invalidate = invalidate + return wrapped - wrapped.invalidate = invalidate - return wrapped + return wrap class LoggingTransaction(object): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index b8fcc1927e..33a832483e 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -247,7 +247,7 @@ class RoomMemberStore(SQLBaseStore): results = self._parse_events_txn(txn, rows) return results - @cached + @cached() def get_rooms_for_user(self, user_id): return self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN], diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py new file mode 100644 index 0000000000..057f798640 --- /dev/null +++ b/tests/storage/test__base.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tests import unittest +from twisted.internet import defer + +from synapse.storage._base import cached + + +class CacheDecoratorTestCase(unittest.TestCase): + + @defer.inlineCallbacks + def test_passthrough(self): + @cached() + def func(self, key): + return key + + self.assertEquals((yield func(self, "foo")), "foo") + self.assertEquals((yield func(self, "bar")), "bar") + + @defer.inlineCallbacks + def test_hit(self): + callcount = [0] + + @cached() + def func(self, key): + callcount[0] += 1 + return key + + yield func(self, "foo") + + self.assertEquals(callcount[0], 1) + + self.assertEquals((yield func(self, "foo")), "foo") + self.assertEquals(callcount[0], 1) + + @defer.inlineCallbacks + def test_invalidate(self): + callcount = [0] + + @cached() + def func(self, key): + callcount[0] += 1 + return key + + yield func(self, "foo") + + self.assertEquals(callcount[0], 1) + + func.invalidate("foo") + + yield func(self, "foo") + + self.assertEquals(callcount[0], 2) + + @defer.inlineCallbacks + def test_max_entries(self): + callcount = [0] + + @cached(max_entries=10) + def func(self, key): + callcount[0] += 1 + return key + + for k in range(0,12): + yield func(self, k) + + self.assertEquals(callcount[0], 12) + + # There must have been at least 2 evictions, meaning if we calculate + # all 12 values again, we must get called at least 2 more times + for k in range(0,12): + yield func(self, k) + + self.assertTrue(callcount[0] >= 14, + msg="Expected callcount >= 14, got %d" % (callcount[0])) -- cgit 1.5.1 From 55022d6ca5bfe3e99fd3144e291906063885ce12 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Thu, 19 Feb 2015 18:38:09 +0000 Subject: Remove a TODO note --- synapse/storage/_base.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 61657d36ed..78ba5f25ea 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -36,7 +36,6 @@ transaction_logger = logging.getLogger("synapse.storage.txn") # TODO(paul): -# * Move this somewhere higher-level, shared; # * more generic key management # * export monitoring stats # * consider other eviction strategies - LRU? -- cgit 1.5.1 From 42b972bccd0cf7d903befb498f9c1bbd5c4e6583 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Feb 2015 14:35:23 +0000 Subject: Revert get_auth_chain changes --- synapse/storage/event_federation.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 22bf7ad832..2deda8ac50 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -32,15 +32,15 @@ class EventFederationStore(SQLBaseStore): and backfilling from another server respectively. """ - def get_auth_chain(self, event_ids, have_ids=set()): + def get_auth_chain(self, event_ids): return self.runInteraction( "get_auth_chain", self._get_auth_chain_txn, - event_ids, have_ids + event_ids ) - def _get_auth_chain_txn(self, txn, event_ids, have_ids): - results = self._get_auth_chain_ids_txn(txn, event_ids, have_ids) + def _get_auth_chain_txn(self, txn, event_ids): + results = self._get_auth_chain_ids_txn(txn, event_ids) return self._get_events_txn(txn, results) @@ -51,9 +51,8 @@ class EventFederationStore(SQLBaseStore): event_ids ) - def _get_auth_chain_ids_txn(self, txn, event_ids, have_ids): + def _get_auth_chain_ids_txn(self, txn, event_ids): results = set() - have_ids = set(have_ids) base_sql = ( "SELECT auth_id FROM event_auth WHERE event_id = ?" @@ -67,7 +66,6 @@ class EventFederationStore(SQLBaseStore): new_front.update([r[0] for r in txn.fetchall()]) new_front -= results - new_front -= have_ids front = new_front results.update(front) -- cgit 1.5.1 From 4631b737fdb08185d514e69f0e6860c0860768b5 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 14:38:44 +0000 Subject: Squash out the now-redundant ApplicationServicesCache object class --- synapse/storage/appservice.py | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index d941b1f387..dc3666efd4 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -23,23 +23,11 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) -class ApplicationServiceCache(object): - """Caches ApplicationServices and provides utility functions on top. - - This class is designed to be invoked on incoming events in order to avoid - hammering the database every time to extract a list of application service - regexes. - """ - - def __init__(self): - self.services = [] - - class ApplicationServiceStore(SQLBaseStore): def __init__(self, hs): super(ApplicationServiceStore, self).__init__(hs) - self.cache = ApplicationServiceCache() + self.services_cache = [] self.cache_defer = self._populate_cache() @defer.inlineCallbacks @@ -56,7 +44,7 @@ class ApplicationServiceStore(SQLBaseStore): token, ) # update cache TODO: Should this be in the txn? - for service in self.cache.services: + for service in self.services_cache: if service.token == token: service.url = None service.namespaces = None @@ -110,13 +98,13 @@ class ApplicationServiceStore(SQLBaseStore): ) # update cache TODO: Should this be in the txn? - for (index, cache_service) in enumerate(self.cache.services): + for (index, cache_service) in enumerate(self.services_cache): if service.token == cache_service.token: - self.cache.services[index] = service + self.services_cache[index] = service logger.info("Updated: %s", service) return # new entry - self.cache.services.append(service) + self.services_cache.append(service) logger.info("Updated(new): %s", service) def _update_app_service_txn(self, txn, service): @@ -160,7 +148,7 @@ class ApplicationServiceStore(SQLBaseStore): @defer.inlineCallbacks def get_app_services(self): yield self.cache_defer # make sure the cache is ready - defer.returnValue(self.cache.services) + defer.returnValue(self.services_cache) @defer.inlineCallbacks def get_app_service_by_token(self, token, from_cache=True): @@ -176,7 +164,7 @@ class ApplicationServiceStore(SQLBaseStore): yield self.cache_defer # make sure the cache is ready if from_cache: - for service in self.cache.services: + for service in self.services_cache: if service.token == token: defer.returnValue(service) return @@ -235,7 +223,7 @@ class ApplicationServiceStore(SQLBaseStore): # TODO get last successful txn id f.e. service for service in services.values(): logger.info("Found application service: %s", service) - self.cache.services.append(ApplicationService( + self.services_cache.append(ApplicationService( token=service["token"], url=service["url"], namespaces=service["namespaces"], -- cgit 1.5.1 From e76d485e29498fce7412423e7a5b6ac6bc287ec3 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 15:41:54 +0000 Subject: Allow @cached-wrapped functions to have a prefill method for setting entries --- synapse/storage/_base.py | 23 +++++++++++++++-------- tests/storage/test__base.py | 14 ++++++++++++++ 2 files changed, 29 insertions(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 78ba5f25ea..4b1ec687c9 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -48,24 +48,30 @@ def cached(max_entries=1000): The wrapped function has an additional member, a callable called "invalidate". This can be used to remove individual entries from the cache. + + The wrapped function has another additional callable, called "prefill", + which can be used to insert values into the cache specifically, without + calling the calculation function. """ def wrap(orig): cache = {} - @defer.inlineCallbacks - def wrapped(self, key): - if key in cache: - defer.returnValue(cache[key]) - - ret = yield orig(self, key) - + def prefill(key, value): while len(cache) > max_entries: # TODO(paul): This feels too biased. However, a random index # would be a bit inefficient, walking the list of keys just # to ignore most of them? del cache[cache.keys()[0]] - cache[key] = ret; + cache[key] = value + + @defer.inlineCallbacks + def wrapped(self, key): + if key in cache: + defer.returnValue(cache[key]) + + ret = yield orig(self, key) + prefill(key, ret) defer.returnValue(ret) def invalidate(key): @@ -73,6 +79,7 @@ def cached(max_entries=1000): del cache[key] wrapped.invalidate = invalidate + wrapped.prefill = prefill return wrapped return wrap diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 057f798640..fb306cb784 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -87,3 +87,17 @@ class CacheDecoratorTestCase(unittest.TestCase): self.assertTrue(callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0])) + + @defer.inlineCallbacks + def test_prefill(self): + callcount = [0] + + @cached() + def func(self, key): + callcount[0] += 1 + return key + + func.prefill("foo", 123) + + self.assertEquals((yield func(self, "foo")), 123) + self.assertEquals(callcount[0], 0) -- cgit 1.5.1 From 357fba2c24067796ce89f25636a2541bc9a10752 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 15:57:41 +0000 Subject: RoomMemberStore no longer needs a _user_rooms_cache member --- synapse/storage/roommember.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 33a832483e..58aa376c20 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -35,11 +35,6 @@ RoomsForUser = namedtuple( class RoomMemberStore(SQLBaseStore): - def __init__(self, *args, **kw): - super(RoomMemberStore, self).__init__(*args, **kw) - - self._user_rooms_cache = {} - def _store_room_member_txn(self, txn, event): """Store a room member in the database. """ -- cgit 1.5.1 From 044d813ef7bb67e7b680087d82f931e4c780218f Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 16:04:40 +0000 Subject: Use the @cached decorator to implement the destination_retry_timings cache --- synapse/storage/transactions.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index e06ef35690..6cac8d01ac 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore, Table +from ._base import SQLBaseStore, Table, cached from collections import namedtuple @@ -28,10 +28,6 @@ class TransactionStore(SQLBaseStore): """A collection of queries for handling PDUs. """ - # a write-through cache of DestinationsTable.EntryType indexed by - # destination string - destination_retry_cache = {} - def get_received_txn_response(self, transaction_id, origin): """For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response @@ -211,6 +207,7 @@ class TransactionStore(SQLBaseStore): return ReceivedTransactionsTable.decode_results(txn.fetchall()) + @cached() def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -221,9 +218,6 @@ class TransactionStore(SQLBaseStore): None if not retrying Otherwise a DestinationsTable.EntryType for the retry scheme """ - if destination in self.destination_retry_cache: - return defer.succeed(self.destination_retry_cache[destination]) - return self.runInteraction( "get_destination_retry_timings", self._get_destination_retry_timings, destination) @@ -250,7 +244,9 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ - self.destination_retry_cache[destination] = ( + # As this is the new value, we might as well prefill the cache + self.get_destination_retry_timings.prefill( + destination, DestinationsTable.EntryType( destination, retry_last_ts, -- cgit 1.5.1 From a09e59a69830ba99d65f54b2385c3cab341accb0 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 16:55:57 +0000 Subject: Pull the _get_event_cache.setdefault() call out of the try block, as it doesn't need to be there and is confusing --- synapse/storage/_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 4b1ec687c9..84f222b3db 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -636,8 +636,9 @@ class SQLBaseStore(object): start_time = time.time() * 1000 update_counter = self._get_event_counters.update + cache = self._get_event_cache.setdefault(event_id, {}) + try: - cache = self._get_event_cache.setdefault(event_id, {}) # Separate cache entries for each way to invoke _get_event_txn return cache[(check_redacted, get_prev_content, allow_rejected)] except KeyError: -- cgit 1.5.1 From 74048bdd41108e0c98e034b6ebc5890fd3bcf92b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 23 Feb 2015 18:17:43 +0000 Subject: Remove unused import --- synapse/storage/transactions.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 6cac8d01ac..0b8a3b7a07 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -17,8 +17,6 @@ from ._base import SQLBaseStore, Table, cached from collections import namedtuple -from twisted.internet import defer - import logging logger = logging.getLogger(__name__) -- cgit 1.5.1 From 27080698e7e9c7d8bb8dfe473f888abdfc48fee7 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 23 Feb 2015 18:19:13 +0000 Subject: Fix code style warning --- synapse/storage/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 84f222b3db..42edb56c36 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -79,7 +79,7 @@ def cached(max_entries=1000): del cache[key] wrapped.invalidate = invalidate - wrapped.prefill = prefill + wrapped.prefill = prefill return wrapped return wrap -- cgit 1.5.1 From f53fcbce9789186c1c42fe2f93ba46e3d8720b1b Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 18:29:26 +0000 Subject: Use cache.pop() instead of a separate membership test + del [] --- synapse/storage/_base.py | 3 +-- tests/storage/test__base.py | 7 +++++++ 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 42edb56c36..da698cb3b8 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -75,8 +75,7 @@ def cached(max_entries=1000): defer.returnValue(ret) def invalidate(key): - if key in cache: - del cache[key] + cache.pop(key, None) wrapped.invalidate = invalidate wrapped.prefill = prefill diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index fb306cb784..55d22f665a 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -66,6 +66,13 @@ class CacheDecoratorTestCase(unittest.TestCase): self.assertEquals(callcount[0], 2) + def test_invalidate_missing(self): + @cached() + def func(self, key): + return key + + func.invalidate("what") + @defer.inlineCallbacks def test_max_entries(self): callcount = [0] -- cgit 1.5.1 From 9640510de206d673e4c0c9cbd1cef219bcd488b2 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Mon, 23 Feb 2015 18:41:58 +0000 Subject: Use OrderedDict for @cached backing store, so we can evict the oldest key unbiased --- synapse/storage/_base.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index da698cb3b8..c98dd36aed 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -23,7 +23,7 @@ from synapse.util.lrucache import LruCache from twisted.internet import defer -import collections +from collections import namedtuple, OrderedDict import simplejson as json import sys import time @@ -54,14 +54,11 @@ def cached(max_entries=1000): calling the calculation function. """ def wrap(orig): - cache = {} + cache = OrderedDict() def prefill(key, value): while len(cache) > max_entries: - # TODO(paul): This feels too biased. However, a random index - # would be a bit inefficient, walking the list of keys just - # to ignore most of them? - del cache[cache.keys()[0]] + cache.popitem(last=False) cache[key] = value @@ -836,7 +833,7 @@ class JoinHelper(object): for table in self.tables: res += [f for f in table.fields if f not in res] - self.EntryType = collections.namedtuple("JoinHelperEntry", res) + self.EntryType = namedtuple("JoinHelperEntry", res) def get_fields(self, **prefixes): """Get a string representing a list of fields for use in SELECT -- cgit 1.5.1 From 2d20466f9a1349c97d5a3822eb4ee64f19bbdf27 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 25 Feb 2015 15:00:59 +0000 Subject: Add stub functions and work out execution flow to implement AS event stream polling. --- synapse/handlers/events.py | 3 --- synapse/handlers/room.py | 34 +++++++++++++++++++++++++--------- synapse/storage/appservice.py | 19 +++++++++++++++++++ synapse/storage/stream.py | 21 +++++++++++++++++++++ 4 files changed, 65 insertions(+), 12 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 025e7e7e62..8d5f5c8499 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -69,9 +69,6 @@ class EventStreamHandler(BaseHandler): ) self._streams_per_user[auth_user] += 1 - if pagin_config.from_token is None: - pagin_config.from_token = None - rm_handler = self.hs.get_handlers().room_member_handler room_ids = yield rm_handler.get_rooms_for_user(auth_user) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 914742d913..a8b0c95636 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -510,9 +510,16 @@ class RoomMemberHandler(BaseHandler): def get_rooms_for_user(self, user, membership_list=[Membership.JOIN]): """Returns a list of roomids that the user has any of the given membership states in.""" - rooms = yield self.store.get_rooms_for_user_where_membership_is( - user_id=user.to_string(), membership_list=membership_list + + app_service = yield self.store.get_app_service_by_user_id( + user.to_string() ) + if app_service: + rooms = yield self.store.get_app_service_rooms(app_service) + else: + rooms = yield self.store.get_rooms_for_user_where_membership_is( + user_id=user.to_string(), membership_list=membership_list + ) # For some reason the list of events contains duplicates # TODO(paul): work out why because I really don't think it should @@ -559,13 +566,22 @@ class RoomEventSource(object): to_key = yield self.get_current_key() - events, end_key = yield self.store.get_room_events_stream( - user_id=user.to_string(), - from_key=from_key, - to_key=to_key, - room_id=None, - limit=limit, - ) + app_service = self.store.get_app_service_by_user_id(user.to_string()) + if app_service: + events, end_key = yield self.store.get_appservice_room_stream( + service=app_service, + from_key=from_key, + to_key=to_key, + limit=limit, + ) + else: + events, end_key = yield self.store.get_room_events_stream( + user_id=user.to_string(), + from_key=from_key, + to_key=to_key, + room_id=None, + limit=limit, + ) defer.returnValue((events, end_key)) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index dc3666efd4..435ccfd6fc 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -17,6 +17,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError from synapse.appservice import ApplicationService +from synapse.storage.roommember import RoomsForUser from ._base import SQLBaseStore @@ -150,6 +151,16 @@ class ApplicationServiceStore(SQLBaseStore): yield self.cache_defer # make sure the cache is ready defer.returnValue(self.services_cache) + @defer.inlineCallbacks + def get_app_service_by_user_id(self, user_id): + yield self.cache_defer # make sure the cache is ready + + for service in self.services_cache: + if service.sender == user_id: + defer.returnValue(service) + return + defer.returnValue(None) + @defer.inlineCallbacks def get_app_service_by_token(self, token, from_cache=True): """Get the application service with the given token. @@ -173,6 +184,14 @@ class ApplicationServiceStore(SQLBaseStore): # TODO: The from_cache=False impl # TODO: This should be JOINed with the application_services_regex table. + @defer.inlineCallbacks + def get_app_service_rooms(self, service): + logger.info("get_app_service_rooms -> %s", service) + + # TODO stub + yield self.cache_defer + defer.returnValue([RoomsForUser("!foo:bar", service.sender, "join")]) + @defer.inlineCallbacks def _populate_cache(self): """Populates the ApplicationServiceCache from the database.""" diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3ccb6f8a61..aa3c9f8c9c 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -127,6 +127,27 @@ class _StreamToken(namedtuple("_StreamToken", "topological stream")): class StreamStore(SQLBaseStore): + + def get_appservice_room_stream(self, service, from_key, to_key, limit=0): + # NB this lives here instead of appservice.py so we can reuse the + # 'private' StreamToken class in this file. + logger.info("get_appservice_room_stream -> %s", service) + + if limit: + limit = max(limit, MAX_STREAM_SIZE) + else: + limit = MAX_STREAM_SIZE + + # From and to keys should be integers from ordering. + from_id = _StreamToken.parse_stream_token(from_key) + to_id = _StreamToken.parse_stream_token(to_key) + + if from_key == to_key: + return defer.succeed(([], to_key)) + + # TODO stub + return defer.succeed(([], to_key)) + @log_function def get_room_events_stream(self, user_id, from_key, to_key, room_id, limit=0, with_feedback=False): -- cgit 1.5.1 From 2b8ca84296b228b7cef09244605e4f2760349538 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 25 Feb 2015 17:15:25 +0000 Subject: Add support for extracting matching room_ids and room_aliases for a given AS. --- synapse/storage/appservice.py | 50 +++++++++++++++++++++++++++++++++++++++++-- synapse/storage/directory.py | 23 ++++++++++++++++++++ synapse/storage/room.py | 11 ++++++++++ 3 files changed, 82 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 435ccfd6fc..c8f0ce44f4 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -153,6 +153,19 @@ class ApplicationServiceStore(SQLBaseStore): @defer.inlineCallbacks def get_app_service_by_user_id(self, user_id): + """Retrieve an application service from their user ID. + + All application services have associated with them a particular user ID. + There is no distinguishing feature on the user ID which indicates it + represents an application service. This function allows you to map from + a user ID to an application service. + + Args: + user_id(str): The user ID to see if it is an application service. + Returns: + synapse.appservice.ApplicationService or None. + """ + yield self.cache_defer # make sure the cache is ready for service in self.services_cache: @@ -163,7 +176,7 @@ class ApplicationServiceStore(SQLBaseStore): @defer.inlineCallbacks def get_app_service_by_token(self, token, from_cache=True): - """Get the application service with the given token. + """Get the application service with the given appservice token. Args: token (str): The application service token. @@ -186,10 +199,43 @@ class ApplicationServiceStore(SQLBaseStore): @defer.inlineCallbacks def get_app_service_rooms(self, service): - logger.info("get_app_service_rooms -> %s", service) + """Get a list of RoomsForUser for this application service. + + Application services may be "interested" in lots of rooms depending on + the room ID, the room aliases, or the members in the room. This function + takes all of these into account and returns a list of RoomsForUser which + represent the entire list of room IDs that this application service + wants to know about. + + Args: + service: The application service to get a room list for. + Returns: + A list of RoomsForUser. + """ + # FIXME: This is assuming that this store has methods from + # RoomStore, DirectoryStore, which is a bad assumption to + # make as it makes testing trickier and coupling less obvious. + + # get all rooms matching the room ID regex. + room_entries = yield self.get_all_rooms() # RoomEntry list + matching_room_id_list = [ + r.room_id for r in room_entries if + service.is_interested_in_room(r.room_id) + ] + + # resolve room IDs for matching room alias regex. + room_alias_mappings = yield self.get_all_associations() + matching_alias_list = [ + r.room_id for r in room_alias_mappings if + service.is_interested_in_alias(r.room_alias) + ] + + # get all rooms for every user for this AS. # TODO stub yield self.cache_defer + + defer.returnValue([RoomsForUser("!foo:bar", service.sender, "join")]) @defer.inlineCallbacks diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 68b7d59693..e13b336934 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -134,6 +134,29 @@ class DirectoryStore(SQLBaseStore): return room_id + @defer.inlineCallbacks + def get_all_associations(self): + """Retrieve the entire list of room alias -> room ID pairings. + + Returns: + A list of RoomAliasMappings. + """ + results = self._simple_select_list( + "room_aliases", + None, + ["room_alias", "room_id"] + ) + # TODO(kegan): It feels wrong to be specifying no servers here, but + # equally this function isn't required to obtain all servers so + # retrieving them "just for the sake of it" also seems wrong, but we + # want to conform to passing Objects around and not dicts.. + return [ + RoomAliasMapping( + room_id=r["room_id"], room_alias=r["room_alias"], servers="" + ) for r in results + ] + + def get_aliases_for_room(self, room_id): return self._simple_select_onecol( "room_aliases", diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 750b17a45f..3a64693404 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -71,6 +71,17 @@ class RoomStore(SQLBaseStore): RoomsTable.decode_single_result, query, room_id, ) + def get_all_rooms(self): + """Retrieve all the rooms. + + Returns: + A list of namedtuples containing the room information. + """ + query = RoomsTable.select_statement() + return self._execute( + RoomsTable.decode_results, query, + ) + @defer.inlineCallbacks def get_rooms(self, is_public): """Retrieve a list of all public rooms. -- cgit 1.5.1 From 2c79c4dc7f638f1cb823903a2f8bb1005fda4a2c Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 25 Feb 2015 17:37:14 +0000 Subject: Fix alias query. --- synapse/storage/directory.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index e13b336934..70c8c8ccd3 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -141,20 +141,19 @@ class DirectoryStore(SQLBaseStore): Returns: A list of RoomAliasMappings. """ - results = self._simple_select_list( - "room_aliases", - None, - ["room_alias", "room_id"] + results = yield self._execute_and_decode( + "SELECT room_id, room_alias FROM room_aliases" ) + # TODO(kegan): It feels wrong to be specifying no servers here, but # equally this function isn't required to obtain all servers so # retrieving them "just for the sake of it" also seems wrong, but we # want to conform to passing Objects around and not dicts.. - return [ + defer.returnValue([ RoomAliasMapping( room_id=r["room_id"], room_alias=r["room_alias"], servers="" ) for r in results - ] + ]) def get_aliases_for_room(self, room_id): -- cgit 1.5.1 From 978ce87c86e60fd49f078d5bea79715abea6d236 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 25 Feb 2015 17:37:48 +0000 Subject: Comment unused variables. --- synapse/storage/stream.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index aa3c9f8c9c..3c8f3320f1 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -139,8 +139,8 @@ class StreamStore(SQLBaseStore): limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. - from_id = _StreamToken.parse_stream_token(from_key) - to_id = _StreamToken.parse_stream_token(to_key) + # from_id = _StreamToken.parse_stream_token(from_key) + # to_id = _StreamToken.parse_stream_token(to_key) if from_key == to_key: return defer.succeed(([], to_key)) -- cgit 1.5.1 From 29267cf9d7fbacdfcccaaef9160657f24b9aca14 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Wed, 25 Feb 2015 17:42:28 +0000 Subject: PEP8 and pyflakes --- synapse/storage/appservice.py | 8 +++----- synapse/storage/directory.py | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index c8f0ce44f4..017b6d1e86 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -229,12 +229,10 @@ class ApplicationServiceStore(SQLBaseStore): r.room_id for r in room_alias_mappings if service.is_interested_in_alias(r.room_alias) ] + logging.debug(matching_alias_list) + logging.debug(matching_room_id_list) - # get all rooms for every user for this AS. - - # TODO stub - yield self.cache_defer - + # TODO get all rooms for every user for this AS. defer.returnValue([RoomsForUser("!foo:bar", service.sender, "join")]) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 70c8c8ccd3..e391239a3c 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -155,7 +155,6 @@ class DirectoryStore(SQLBaseStore): ) for r in results ]) - def get_aliases_for_room(self, room_id): return self._simple_select_onecol( "room_aliases", -- cgit 1.5.1 From 94fa334b01d232bed96fd4ee05fc44d00330c2b9 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 25 Feb 2015 19:17:07 +0000 Subject: Add enable/disable overlay for push rules (REST API not yet hooked up) --- synapse/push/__init__.py | 4 ++++ synapse/push/baserules.py | 3 +++ synapse/storage/push_rule.py | 20 ++++++++++++++++++++ synapse/storage/schema/delta/next_pushrules2.sql | 9 +++++++++ synapse/storage/schema/pusher.sql | 10 ++++++++++ 5 files changed, 46 insertions(+) create mode 100644 synapse/storage/schema/delta/next_pushrules2.sql (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 0fb3e4f7f3..40fae91ab5 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -82,6 +82,8 @@ class Pusher(object): r['conditions'] = json.loads(r['conditions']) r['actions'] = json.loads(r['actions']) + enabled_map = yield self.store.get_push_rules_enabled_for_user_name(self.user_name) + user = UserID.from_string(self.user_name) rules = baserules.list_with_base_rules(rawrules, user) @@ -107,6 +109,8 @@ class Pusher(object): room_member_count += 1 for r in rules: + if r['rule_id'] in enabled_map and not enabled_map[r['rule_id']]: + continue matches = True conditions = r['conditions'] diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 162d265f66..ba9a181b56 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -45,6 +45,7 @@ def make_base_rules(user, kind): def make_base_content_rules(user): return [ { + 'rule_id': '.m.rule.contains_user_name', 'conditions': [ { 'kind': 'event_match', @@ -66,6 +67,7 @@ def make_base_content_rules(user): def make_base_override_rules(): return [ { + 'rule_id': '.m.rule.contains_display_name', 'conditions': [ { 'kind': 'contains_display_name' @@ -80,6 +82,7 @@ def make_base_override_rules(): ] }, { + 'rule_id': '.m.rule.room_two_members', 'conditions': [ { 'kind': 'room_member_count', diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index ae46b39cc1..3890c7aab2 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -45,6 +45,17 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(dicts) + @defer.inlineCallbacks + def get_push_rules_enabled_for_user_name(self, user_name): + results = yield self._simple_select_list( + PushRuleEnableTable.table_name, + {'user_name': user_name}, + PushRuleEnableTable.fields + ) + defer.returnValue( + {r['rule_id']: False if r['enabled'] == 0 else True for r in results} + ) + @defer.inlineCallbacks def add_push_rule(self, before, after, **kwargs): vals = copy.copy(kwargs) @@ -216,3 +227,12 @@ class PushRuleTable(Table): ] EntryType = collections.namedtuple("PushRuleEntry", fields) + +class PushRuleEnableTable(Table): + table_name = "push_rules_enable" + + fields = [ + "user_name", + "rule_id", + "enabled" + ] \ No newline at end of file diff --git a/synapse/storage/schema/delta/next_pushrules2.sql b/synapse/storage/schema/delta/next_pushrules2.sql new file mode 100644 index 0000000000..0212726448 --- /dev/null +++ b/synapse/storage/schema/delta/next_pushrules2.sql @@ -0,0 +1,9 @@ +CREATE TABLE IF NOT EXISTS push_rules_enable ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + enabled TINYINT, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name); diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql index 3735b11547..31bf1cb685 100644 --- a/synapse/storage/schema/pusher.sql +++ b/synapse/storage/schema/pusher.sql @@ -44,3 +44,13 @@ CREATE TABLE IF NOT EXISTS push_rules ( ); CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); + +CREATE TABLE IF NOT EXISTS push_rules_enable ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + enabled TINYINT, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name); -- cgit 1.5.1 From 944003021bb9f6aa232d1436761c1c07f0273241 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 26 Feb 2015 13:43:05 +0000 Subject: whitespace --- synapse/storage/push_rule.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 3890c7aab2..cd8d0f6dde 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -228,6 +228,7 @@ class PushRuleTable(Table): EntryType = collections.namedtuple("PushRuleEntry", fields) + class PushRuleEnableTable(Table): table_name = "push_rules_enable" @@ -235,4 +236,4 @@ class PushRuleEnableTable(Table): "user_name", "rule_id", "enabled" - ] \ No newline at end of file + ] -- cgit 1.5.1 From 92478e96d6f6992146102599ca96b8dcacbf3895 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 26 Feb 2015 14:35:28 +0000 Subject: Finish impl to extract all room IDs an AS may be interested in when polling the event stream. --- synapse/storage/appservice.py | 35 +++++++++++++++++++++++++++++------ synapse/storage/registration.py | 7 +++++++ 2 files changed, 36 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 017b6d1e86..d0632d55d1 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -213,8 +213,9 @@ class ApplicationServiceStore(SQLBaseStore): A list of RoomsForUser. """ # FIXME: This is assuming that this store has methods from - # RoomStore, DirectoryStore, which is a bad assumption to - # make as it makes testing trickier and coupling less obvious. + # RoomStore, DirectoryStore, RegistrationStore, RoomMemberStore which is + # a bad assumption to make as it makes testing trickier and coupling + # less obvious. # get all rooms matching the room ID regex. room_entries = yield self.get_all_rooms() # RoomEntry list @@ -229,12 +230,34 @@ class ApplicationServiceStore(SQLBaseStore): r.room_id for r in room_alias_mappings if service.is_interested_in_alias(r.room_alias) ] - logging.debug(matching_alias_list) - logging.debug(matching_room_id_list) + room_ids_matching_alias_or_id = set( + matching_room_id_list + matching_alias_list + ) - # TODO get all rooms for every user for this AS. + # get all rooms for every user for this AS. This is scoped to users on + # this HS only. + user_list = yield self.get_all_users() + user_list = [ + u["name"] for u in user_list if + service.is_interested_in_user(u["name"]) + ] + rooms_for_user_matching_user_id = [] # RoomsForUser list + for user_id in user_list: + rooms_for_user = yield self.get_rooms_for_user(user_id) + rooms_for_user_matching_user_id += rooms_for_user + rooms_for_user_matching_user_id = set(rooms_for_user_matching_user_id) + + # make RoomsForUser tuples for room ids and aliases which are not in the + # main rooms_for_user_list - e.g. they are rooms which do not have AS + # registered users in it. + known_room_ids = [r.room_id for r in rooms_for_user_matching_user_id] + missing_rooms_for_user = [ + RoomsForUser(r, service.sender, "join") for r in + room_ids_matching_alias_or_id if r not in known_room_ids + ] + rooms_for_user_matching_user_id |= set(missing_rooms_for_user) - defer.returnValue([RoomsForUser("!foo:bar", service.sender, "join")]) + defer.returnValue(rooms_for_user_matching_user_id) @defer.inlineCallbacks def _populate_cache(self): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 029b07cc66..7aff3dbd33 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -92,6 +92,13 @@ class RegistrationStore(SQLBaseStore): query, user_id ) + def get_all_users(self): + query = ("SELECT users.name FROM users") + return self._execute( + self.cursor_to_dict, + query + ) + def get_user_by_token(self, token): """Get a user from the given access token. -- cgit 1.5.1 From dcec7175dc30754603618351b59bc72ff41d305b Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 26 Feb 2015 16:23:01 +0000 Subject: Finish impl to get new events for AS. ASes should now be able to poll /events --- synapse/handlers/room.py | 4 ++- synapse/storage/stream.py | 62 +++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 58 insertions(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a8b0c95636..80f7ee3f12 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -566,7 +566,9 @@ class RoomEventSource(object): to_key = yield self.get_current_key() - app_service = self.store.get_app_service_by_user_id(user.to_string()) + app_service = yield self.store.get_app_service_by_user_id( + user.to_string() + ) if app_service: events, end_key = yield self.store.get_appservice_room_stream( service=app_service, diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3c8f3320f1..6946e9fe70 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -128,25 +128,73 @@ class _StreamToken(namedtuple("_StreamToken", "topological stream")): class StreamStore(SQLBaseStore): + @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): # NB this lives here instead of appservice.py so we can reuse the # 'private' StreamToken class in this file. - logger.info("get_appservice_room_stream -> %s", service) - if limit: limit = max(limit, MAX_STREAM_SIZE) else: limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. - # from_id = _StreamToken.parse_stream_token(from_key) - # to_id = _StreamToken.parse_stream_token(to_key) + from_id = _StreamToken.parse_stream_token(from_key) + to_id = _StreamToken.parse_stream_token(to_key) if from_key == to_key: - return defer.succeed(([], to_key)) + defer.returnValue(([], to_key)) + return + + # Logic: + # - We want ALL events which match the AS room_id regex + # - We want ALL events which match the rooms represented by the AS + # room_alias regex + # - We want ALL events for rooms that AS users have joined. + # This is currently supported via get_app_service_rooms (which is used + # for the Notifier listener rooms). We can't reasonably make a SQL + # query for these room IDs, so we'll pull all the events between from/to + # and filter in python. + rooms_for_as = yield self.get_app_service_rooms(service) + room_ids_for_as = [r.room_id for r in rooms_for_as] + + # select all the events between from/to with a sensible limit + sql = ( + "SELECT e.event_id, e.room_id, e.stream_ordering FROM events AS e " + "WHERE e.stream_ordering > ? AND e.stream_ordering <= ? " + "ORDER BY stream_ordering ASC LIMIT %(limit)d " + ) % { + "limit": limit + } + + def f(txn): + txn.execute(sql, (from_id.stream, to_id.stream,)) + + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + # apply the filter on the room id list + [ + r["event_id"] for r in rows + if r["room_id"] in room_ids_for_as + ], + get_prev_content=True + ) + + self._set_before_and_after(ret, rows) + + if rows: + key = "s%d" % max([r["stream_ordering"] for r in rows]) + + else: + # Assume we didn't get anything because there was nothing to + # get. + key = to_key + + return ret, key - # TODO stub - return defer.succeed(([], to_key)) + results = yield self.runInteraction("get_appservice_room_stream", f) + defer.returnValue(results) @log_function def get_room_events_stream(self, user_id, from_key, to_key, room_id, -- cgit 1.5.1 From f0995436e7951448d8be3c372f4002845a111a7d Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 26 Feb 2015 17:21:17 +0000 Subject: Check for membership invite events correctly. --- synapse/storage/stream.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 6946e9fe70..5d01ecf200 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -36,12 +36,14 @@ what sort order was used: from twisted.internet import defer from ._base import SQLBaseStore +from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.util.logutils import log_function from collections import namedtuple import logging +import simplejson logger = logging.getLogger(__name__) @@ -159,13 +161,30 @@ class StreamStore(SQLBaseStore): # select all the events between from/to with a sensible limit sql = ( - "SELECT e.event_id, e.room_id, e.stream_ordering FROM events AS e " + "SELECT e.event_id, e.room_id, e.type, e.unrecognized_keys, " + "e.stream_ordering FROM events AS e " "WHERE e.stream_ordering > ? AND e.stream_ordering <= ? " "ORDER BY stream_ordering ASC LIMIT %(limit)d " ) % { "limit": limit } + + def app_service_interested(row): + if row["room_id"] in room_ids_for_as: + return True + + if row["type"] == EventTypes.Member: + # load up the content to inspect if some user the AS is + # interested in was invited to a room. We'll be passing this + # through _get_events_txn later, so ignore the fact that this + # may be a redacted event. + event_content = simplejson.loads(row["unrecognized_keys"]) + if (service.is_interested_in_user( + event_content.get("state_key"))): + return True + return False + def f(txn): txn.execute(sql, (from_id.stream, to_id.stream,)) @@ -176,7 +195,7 @@ class StreamStore(SQLBaseStore): # apply the filter on the room id list [ r["event_id"] for r in rows - if r["room_id"] in room_ids_for_as + if app_service_interested(r) ], get_prev_content=True ) -- cgit 1.5.1 From 19590881568f5aafd1b1d0b12cd6c10954ee60b1 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 26 Feb 2015 18:07:44 +0000 Subject: Add API for getting/setting enabled-ness of push rules. --- synapse/push/baserules.py | 8 +++--- synapse/rest/client/v1/push_rule.py | 54 +++++++++++++++++++++++++++++++++---- synapse/storage/push_rule.py | 24 +++++++++++++++++ 3 files changed, 77 insertions(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index ba9a181b56..f4d2be11f6 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -37,7 +37,7 @@ def make_base_rules(user, kind): for r in rules: r['priority_class'] = PRIORITY_CLASS_MAP[kind] - r['default'] = True + r['default'] = True # Deprecated, left for backwards compat return rules @@ -45,7 +45,7 @@ def make_base_rules(user, kind): def make_base_content_rules(user): return [ { - 'rule_id': '.m.rule.contains_user_name', + 'rule_id': 'global/content/.m.rule.contains_user_name', 'conditions': [ { 'kind': 'event_match', @@ -67,7 +67,7 @@ def make_base_content_rules(user): def make_base_override_rules(): return [ { - 'rule_id': '.m.rule.contains_display_name', + 'rule_id': 'global/override/.m.rule.contains_display_name', 'conditions': [ { 'kind': 'contains_display_name' @@ -82,7 +82,7 @@ def make_base_override_rules(): ] }, { - 'rule_id': '.m.rule.room_two_members', + 'rule_id': 'global/override/.m.rule.room_two_members', 'conditions': [ { 'kind': 'room_member_count', diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 73ba0494e6..c6133a8688 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -50,6 +50,10 @@ class PushRuleRestServlet(ClientV1RestServlet): content = _parse_json(request) + if 'attr' in spec: + self.set_rule_attr(user.to_string(), spec, content) + defer.returnValue((200, {})) + try: (conditions, actions) = _rule_tuple_from_request_object( spec['template'], @@ -124,6 +128,9 @@ class PushRuleRestServlet(ClientV1RestServlet): rules['global'] = _add_empty_priority_class_arrays(rules['global']) + enabled_map = yield self.hs.get_datastore().\ + get_push_rules_enabled_for_user_name(user.to_string()) + for r in ruleslist: rulearray = None @@ -149,6 +156,9 @@ class PushRuleRestServlet(ClientV1RestServlet): template_rule = _rule_to_template(r) if template_rule: + template_rule['enabled'] = True + if r['rule_id'] in enabled_map: + template_rule['enabled'] = enabled_map[r['rule_id']] rulearray.append(template_rule) path = request.postpath[1:] @@ -189,6 +199,24 @@ class PushRuleRestServlet(ClientV1RestServlet): def on_OPTIONS(self, _): return 200, {} + def set_rule_attr(self, user_name, spec, val): + if spec['attr'] == 'enabled': + if not isinstance(val, bool): + raise SynapseError(400, "Value for 'enabled' must be boolean") + namespaced_rule_id = _namespaced_rule_id_from_spec(spec) + self.hs.get_datastore().set_push_rule_enabled( + user_name, namespaced_rule_id, val + ) + else: + raise UnrecognizedRequestError() + + def get_rule_attr(self, user_name, namespaced_rule_id, attr): + if attr == 'enabled': + return self.hs.get_datastore().get_push_rule_enabled_by_user_name_rule_id( + user_name, namespaced_rule_id + ) + else: + raise UnrecognizedRequestError() def _rule_spec_from_path(path): if len(path) < 2: @@ -226,6 +254,12 @@ def _rule_spec_from_path(path): } if device: spec['profile_tag'] = device + + path = path[1:] + + if len(path) > 0 and len(path[0]) > 0: + spec['attr'] = path[0] + return spec @@ -319,10 +353,23 @@ def _filter_ruleset_with_path(ruleset, path): if path[0] == '': return ruleset[template_kind] rule_id = path[0] + + the_rule = None for r in ruleset[template_kind]: if r['rule_id'] == rule_id: - return r - raise NotFoundError + the_rule = r + if the_rule is None: + raise NotFoundError + + path = path[1:] + if len(path) == 0: + return the_rule + + attr = path[0] + if attr in the_rule: + return the_rule[attr] + else: + raise UnrecognizedRequestError() def _priority_class_from_spec(spec): @@ -399,9 +446,6 @@ class InvalidRuleException(Exception): def _parse_json(request): try: content = json.loads(request.content.read()) - if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.", - errcode=Codes.NOT_JSON) return content except ValueError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index cd8d0f6dde..6c38565773 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -56,6 +56,17 @@ class PushRuleStore(SQLBaseStore): {r['rule_id']: False if r['enabled'] == 0 else True for r in results} ) + @defer.inlineCallbacks + def get_push_rule_enabled_by_user_name_rule_id(self, user_name, rule_id): + results = yield self._simple_select_list( + PushRuleEnableTable.table_name, + {'user_name': user_name, 'rule_id': rule_id}, + ['enabled'] + ) + if len(results) == 0: + defer.returnValue(True) + defer.returnValue(results[0]) + @defer.inlineCallbacks def add_push_rule(self, before, after, **kwargs): vals = copy.copy(kwargs) @@ -204,6 +215,19 @@ class PushRuleStore(SQLBaseStore): {'user_name': user_name, 'rule_id': rule_id} ) + @defer.inlineCallbacks + def set_push_rule_enabled(self, user_name, rule_id, enabled): + if enabled: + yield self._simple_delete_one( + PushRuleEnableTable.table_name, + {'user_name': user_name, 'rule_id': rule_id} + ) + else: + yield self._simple_upsert( + PushRuleEnableTable.table_name, + {'user_name': user_name, 'rule_id': rule_id}, + {'enabled': False} + ) class RuleNotFoundException(Exception): pass -- cgit 1.5.1 From 806a6c886aaa695a7dbfd35f71b9cc59941b8366 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Fri, 27 Feb 2015 09:48:57 +0000 Subject: PEP8 --- synapse/storage/stream.py | 1 - 1 file changed, 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 5d01ecf200..09417bd147 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -169,7 +169,6 @@ class StreamStore(SQLBaseStore): "limit": limit } - def app_service_interested(row): if row["room_id"] in room_ids_for_as: return True -- cgit 1.5.1 From 16b90764adb8f2ab49b1853855d0fb739b79d245 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Fri, 27 Feb 2015 10:44:32 +0000 Subject: Convert expected format for AS regex to include exclusivity. Previously you just specified the regex as a string, now it expects a JSON object with a 'regex' key and an 'exclusive' boolean, as per spec. --- synapse/appservice/__init__.py | 26 ++++++++++++++++++------- synapse/rest/appservice/v1/register.py | 35 ++++++---------------------------- synapse/storage/appservice.py | 16 +++++++++++++--- 3 files changed, 38 insertions(+), 39 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 381b4cfc4a..b5e7ac16ba 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -46,19 +46,31 @@ class ApplicationService(object): def _check_namespaces(self, namespaces): # Sanity check that it is of the form: # { - # users: ["regex",...], - # aliases: ["regex",...], - # rooms: ["regex",...], + # users: [ {regex: "[A-z]+.*", exclusive: true}, ...], + # aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...], + # rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...], # } if not namespaces: return None for ns in ApplicationService.NS_LIST: + if ns not in namespaces: + namespaces[ns] = [] + continue + if type(namespaces[ns]) != list: - raise ValueError("Bad namespace value for '%s'", ns) - for regex in namespaces[ns]: - if not isinstance(regex, basestring): - raise ValueError("Expected string regex for ns '%s'", ns) + raise ValueError("Bad namespace value for '%s'" % ns) + for regex_obj in namespaces[ns]: + if not isinstance(regex_obj, dict): + raise ValueError("Expected dict regex for ns '%s'" % ns) + if not isinstance(regex_obj.get("exclusive"), bool): + raise ValueError( + "Expected bool for 'exclusive' in ns '%s'" % ns + ) + if not isinstance(regex_obj.get("regex"), basestring): + raise ValueError( + "Expected string for 'regex' in ns '%s'" % ns + ) return namespaces def _matches_regex(self, test_string, namespace_key): diff --git a/synapse/rest/appservice/v1/register.py b/synapse/rest/appservice/v1/register.py index 3bd0c1220c..a4f6159773 100644 --- a/synapse/rest/appservice/v1/register.py +++ b/synapse/rest/appservice/v1/register.py @@ -48,18 +48,12 @@ class RegisterRestServlet(AppServiceRestServlet): 400, "Missed required keys: as_token(str) / url(str)." ) - namespaces = { - "users": [], - "rooms": [], - "aliases": [] - } - - if "namespaces" in params: - self._parse_namespace(namespaces, params["namespaces"], "users") - self._parse_namespace(namespaces, params["namespaces"], "rooms") - self._parse_namespace(namespaces, params["namespaces"], "aliases") - - app_service = ApplicationService(as_token, as_url, namespaces) + try: + app_service = ApplicationService( + as_token, as_url, params["namespaces"] + ) + except ValueError as e: + raise SynapseError(400, e.message) app_service = yield self.handler.register(app_service) hs_token = app_service.hs_token @@ -68,23 +62,6 @@ class RegisterRestServlet(AppServiceRestServlet): "hs_token": hs_token })) - def _parse_namespace(self, target_ns, origin_ns, ns): - if ns not in target_ns or ns not in origin_ns: - return # nothing to parse / map through to. - - possible_regex_list = origin_ns[ns] - if not type(possible_regex_list) == list: - raise SynapseError(400, "Namespace %s isn't an array." % ns) - - for regex in possible_regex_list: - if not isinstance(regex, basestring): - raise SynapseError( - 400, "Regex '%s' isn't a string in namespace %s" % - (regex, ns) - ) - - target_ns[ns] = origin_ns[ns] - class UnregisterRestServlet(AppServiceRestServlet): """Handles AS registration with the home server. diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index dc3666efd4..a3aa41e5fc 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import simplejson +from simplejson import JSONDecodeError from twisted.internet import defer from synapse.api.errors import StoreError @@ -23,12 +25,18 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) +def log_failure(failure): + logger.error("Failed to detect application services: %s", failure.value) + logger.error(failure.getTraceback()) + + class ApplicationServiceStore(SQLBaseStore): def __init__(self, hs): super(ApplicationServiceStore, self).__init__(hs) self.services_cache = [] self.cache_defer = self._populate_cache() + self.cache_defer.addErrback(log_failure) @defer.inlineCallbacks def unregister_app_service(self, token): @@ -128,11 +136,11 @@ class ApplicationServiceStore(SQLBaseStore): ) for (ns_int, ns_str) in enumerate(ApplicationService.NS_LIST): if ns_str in service.namespaces: - for regex in service.namespaces[ns_str]: + for regex_obj in service.namespaces[ns_str]: txn.execute( "INSERT INTO application_services_regex(" "as_id, namespace, regex) values(?,?,?)", - (as_id, ns_int, regex) + (as_id, ns_int, simplejson.dumps(regex_obj)) ) return True @@ -215,10 +223,12 @@ class ApplicationServiceStore(SQLBaseStore): try: services[as_token]["namespaces"][ ApplicationService.NS_LIST[ns_int]].append( - res["regex"] + simplejson.loads(res["regex"]) ) except IndexError: logger.error("Bad namespace enum '%s'. %s", ns_int, res) + except JSONDecodeError: + logger.error("Bad regex object '%s'", res["regex"]) # TODO get last successful txn id f.e. service for service in services.values(): -- cgit 1.5.1 From ebc48306662cf8719a0ea64e2955bf8d5e037a8e Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Mar 2015 09:53:00 +0000 Subject: PR tweaks: set earlier on and use 'as json' for compat --- synapse/storage/appservice.py | 18 +++++++----------- synapse/storage/registration.py | 2 +- synapse/storage/stream.py | 8 ++++---- 3 files changed, 12 insertions(+), 16 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index d0632d55d1..c6ca2ab04e 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -219,20 +219,17 @@ class ApplicationServiceStore(SQLBaseStore): # get all rooms matching the room ID regex. room_entries = yield self.get_all_rooms() # RoomEntry list - matching_room_id_list = [ + matching_room_list = set([ r.room_id for r in room_entries if service.is_interested_in_room(r.room_id) - ] + ]) # resolve room IDs for matching room alias regex. room_alias_mappings = yield self.get_all_associations() - matching_alias_list = [ + matching_room_list |= set([ r.room_id for r in room_alias_mappings if service.is_interested_in_alias(r.room_alias) - ] - room_ids_matching_alias_or_id = set( - matching_room_id_list + matching_alias_list - ) + ]) # get all rooms for every user for this AS. This is scoped to users on # this HS only. @@ -241,11 +238,10 @@ class ApplicationServiceStore(SQLBaseStore): u["name"] for u in user_list if service.is_interested_in_user(u["name"]) ] - rooms_for_user_matching_user_id = [] # RoomsForUser list + rooms_for_user_matching_user_id = set() # RoomsForUser list for user_id in user_list: rooms_for_user = yield self.get_rooms_for_user(user_id) - rooms_for_user_matching_user_id += rooms_for_user - rooms_for_user_matching_user_id = set(rooms_for_user_matching_user_id) + rooms_for_user_matching_user_id |= set(rooms_for_user) # make RoomsForUser tuples for room ids and aliases which are not in the # main rooms_for_user_list - e.g. they are rooms which do not have AS @@ -253,7 +249,7 @@ class ApplicationServiceStore(SQLBaseStore): known_room_ids = [r.room_id for r in rooms_for_user_matching_user_id] missing_rooms_for_user = [ RoomsForUser(r, service.sender, "join") for r in - room_ids_matching_alias_or_id if r not in known_room_ids + matching_room_list if r not in known_room_ids ] rooms_for_user_matching_user_id |= set(missing_rooms_for_user) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 7aff3dbd33..9c92575c7f 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -93,7 +93,7 @@ class RegistrationStore(SQLBaseStore): ) def get_all_users(self): - query = ("SELECT users.name FROM users") + query = "SELECT users.name FROM users" return self._execute( self.cursor_to_dict, query diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 09417bd147..bad427288d 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -43,7 +43,7 @@ from synapse.util.logutils import log_function from collections import namedtuple import logging -import simplejson +import simplejson as json logger = logging.getLogger(__name__) @@ -178,7 +178,7 @@ class StreamStore(SQLBaseStore): # interested in was invited to a room. We'll be passing this # through _get_events_txn later, so ignore the fact that this # may be a redacted event. - event_content = simplejson.loads(row["unrecognized_keys"]) + event_content = json.loads(row["unrecognized_keys"]) if (service.is_interested_in_user( event_content.get("state_key"))): return True @@ -202,7 +202,7 @@ class StreamStore(SQLBaseStore): self._set_before_and_after(ret, rows) if rows: - key = "s%d" % max([r["stream_ordering"] for r in rows]) + key = "s%d" % max(r["stream_ordering"] for r in rows) else: # Assume we didn't get anything because there was nothing to @@ -271,7 +271,7 @@ class StreamStore(SQLBaseStore): self._set_before_and_after(ret, rows) if rows: - key = "s%d" % max([r["stream_ordering"] for r in rows]) + key = "s%d" % max(r["stream_ordering"] for r in rows) else: # Assume we didn't get anything because there was nothing to -- cgit 1.5.1 From 3d73383d185b41b9986366da8123255e3a8ce1e0 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Mar 2015 10:16:24 +0000 Subject: Modify _simple_select_list to allow an empty WHERE clause. Use it for get_all_rooms and get_all_users. --- synapse/storage/_base.py | 22 +++++++++++++++------- synapse/storage/appservice.py | 4 ++-- synapse/storage/registration.py | 7 ++----- synapse/storage/room.py | 5 ++--- 4 files changed, 21 insertions(+), 17 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index c98dd36aed..3725c9795d 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -450,7 +450,8 @@ class SQLBaseStore(object): Args: table : string giving the table name - keyvalues : dict of column names and values to select the rows with + keyvalues : dict of column names and values to select the rows with, + or None to not apply a WHERE clause. retcols : list of strings giving the names of the columns to return """ return self.runInteraction( @@ -469,13 +470,20 @@ class SQLBaseStore(object): keyvalues : dict of column names and values to select the rows with retcols : list of strings giving the names of the columns to return """ - sql = "SELECT %s FROM %s WHERE %s ORDER BY rowid asc" % ( - ", ".join(retcols), - table, - " AND ".join("%s = ?" % (k, ) for k in keyvalues) - ) + if keyvalues: + sql = "SELECT %s FROM %s WHERE %s ORDER BY rowid asc" % ( + ", ".join(retcols), + table, + " AND ".join("%s = ?" % (k, ) for k in keyvalues) + ) + txn.execute(sql, keyvalues.values()) + else: + sql = "SELECT %s FROM %s ORDER BY rowid asc" % ( + ", ".join(retcols), + table + ) + txn.execute(sql) - txn.execute(sql, keyvalues.values()) return self.cursor_to_dict(txn) def _simple_update_one(self, table, keyvalues, updatevalues, diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index c6ca2ab04e..0e3eab9422 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -220,8 +220,8 @@ class ApplicationServiceStore(SQLBaseStore): # get all rooms matching the room ID regex. room_entries = yield self.get_all_rooms() # RoomEntry list matching_room_list = set([ - r.room_id for r in room_entries if - service.is_interested_in_room(r.room_id) + r["room_id"] for r in room_entries if + service.is_interested_in_room(r["room_id"]) ]) # resolve room IDs for matching room alias regex. diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9c92575c7f..54cd15bc0e 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -93,11 +93,8 @@ class RegistrationStore(SQLBaseStore): ) def get_all_users(self): - query = "SELECT users.name FROM users" - return self._execute( - self.cursor_to_dict, - query - ) + return self._simple_select_list( + table="users", keyvalues=None, retcols=["name"]) def get_user_by_token(self, token): """Get a user from the given access token. diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 3a64693404..6bd0b22ae5 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -77,9 +77,8 @@ class RoomStore(SQLBaseStore): Returns: A list of namedtuples containing the room information. """ - query = RoomsTable.select_statement() - return self._execute( - RoomsTable.decode_results, query, + return self._simple_select_list( + table="rooms", keyvalues=None, retcols=["room_id"] ) @defer.inlineCallbacks -- cgit 1.5.1 From b216b3689248094989168c340b60f500c93772a7 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Mar 2015 10:41:35 +0000 Subject: JOIN state_events rather than parsing unrecognized_keys to pull out member state_keys --- synapse/storage/appservice.py | 2 +- synapse/storage/stream.py | 14 ++++---------- 2 files changed, 5 insertions(+), 11 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 0e3eab9422..3a267d0442 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -218,7 +218,7 @@ class ApplicationServiceStore(SQLBaseStore): # less obvious. # get all rooms matching the room ID regex. - room_entries = yield self.get_all_rooms() # RoomEntry list + room_entries = yield self.get_all_rooms() matching_room_list = set([ r["room_id"] for r in room_entries if service.is_interested_in_room(r["room_id"]) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index bad427288d..865cb13e9e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -43,7 +43,6 @@ from synapse.util.logutils import log_function from collections import namedtuple import logging -import simplejson as json logger = logging.getLogger(__name__) @@ -161,8 +160,9 @@ class StreamStore(SQLBaseStore): # select all the events between from/to with a sensible limit sql = ( - "SELECT e.event_id, e.room_id, e.type, e.unrecognized_keys, " - "e.stream_ordering FROM events AS e " + "SELECT e.event_id, e.room_id, e.type, s.state_key, " + "e.stream_ordering FROM events AS e LEFT JOIN state_events as s ON " + "e.event_id = s.event_id " "WHERE e.stream_ordering > ? AND e.stream_ordering <= ? " "ORDER BY stream_ordering ASC LIMIT %(limit)d " ) % { @@ -174,13 +174,7 @@ class StreamStore(SQLBaseStore): return True if row["type"] == EventTypes.Member: - # load up the content to inspect if some user the AS is - # interested in was invited to a room. We'll be passing this - # through _get_events_txn later, so ignore the fact that this - # may be a redacted event. - event_content = json.loads(row["unrecognized_keys"]) - if (service.is_interested_in_user( - event_content.get("state_key"))): + if service.is_interested_in_user(row.get("state_key")): return True return False -- cgit 1.5.1 From 377ae369c1275fabdac46fa00c0b2ba238467435 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Mar 2015 11:20:51 +0000 Subject: Wrap all of get_app_service_rooms in a txn. --- synapse/storage/appservice.py | 38 +++++++++++++++++++++++----------- synapse/storage/directory.py | 21 ------------------- synapse/storage/registration.py | 4 ---- synapse/storage/room.py | 10 --------- synapse/storage/roommember.py | 36 +++++++++++++++++--------------- synapse/storage/stream.py | 46 ++++++++++++++++++++--------------------- 6 files changed, 67 insertions(+), 88 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 3a267d0442..97481d113b 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -15,6 +15,7 @@ import logging from twisted.internet import defer +from synapse.api.constants import Membership from synapse.api.errors import StoreError from synapse.appservice import ApplicationService from synapse.storage.roommember import RoomsForUser @@ -197,7 +198,6 @@ class ApplicationServiceStore(SQLBaseStore): # TODO: The from_cache=False impl # TODO: This should be JOINed with the application_services_regex table. - @defer.inlineCallbacks def get_app_service_rooms(self, service): """Get a list of RoomsForUser for this application service. @@ -212,35 +212,49 @@ class ApplicationServiceStore(SQLBaseStore): Returns: A list of RoomsForUser. """ - # FIXME: This is assuming that this store has methods from - # RoomStore, DirectoryStore, RegistrationStore, RoomMemberStore which is - # a bad assumption to make as it makes testing trickier and coupling - # less obvious. + return self.runInteraction( + "get_app_service_rooms", + self._get_app_service_rooms_txn, + service, + ) + def _get_app_service_rooms_txn(self, txn, service): # get all rooms matching the room ID regex. - room_entries = yield self.get_all_rooms() + room_entries = self._simple_select_list_txn( + txn=txn, table="rooms", keyvalues=None, retcols=["room_id"] + ) matching_room_list = set([ r["room_id"] for r in room_entries if service.is_interested_in_room(r["room_id"]) ]) # resolve room IDs for matching room alias regex. - room_alias_mappings = yield self.get_all_associations() + room_alias_mappings = self._simple_select_list_txn( + txn=txn, table="room_aliases", keyvalues=None, + retcols=["room_id", "room_alias"] + ) matching_room_list |= set([ - r.room_id for r in room_alias_mappings if - service.is_interested_in_alias(r.room_alias) + r["room_id"] for r in room_alias_mappings if + service.is_interested_in_alias(r["room_alias"]) ]) # get all rooms for every user for this AS. This is scoped to users on # this HS only. - user_list = yield self.get_all_users() + user_list = self._simple_select_list_txn( + txn=txn, table="users", keyvalues=None, retcols=["name"] + ) user_list = [ u["name"] for u in user_list if service.is_interested_in_user(u["name"]) ] rooms_for_user_matching_user_id = set() # RoomsForUser list for user_id in user_list: - rooms_for_user = yield self.get_rooms_for_user(user_id) + # FIXME: This assumes this store is linked with RoomMemberStore :( + rooms_for_user = self._get_rooms_for_user_where_membership_is_txn( + txn=txn, + user_id=user_id, + membership_list=[Membership.JOIN] + ) rooms_for_user_matching_user_id |= set(rooms_for_user) # make RoomsForUser tuples for room ids and aliases which are not in the @@ -253,7 +267,7 @@ class ApplicationServiceStore(SQLBaseStore): ] rooms_for_user_matching_user_id |= set(missing_rooms_for_user) - defer.returnValue(rooms_for_user_matching_user_id) + return rooms_for_user_matching_user_id @defer.inlineCallbacks def _populate_cache(self): diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index e391239a3c..68b7d59693 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -134,27 +134,6 @@ class DirectoryStore(SQLBaseStore): return room_id - @defer.inlineCallbacks - def get_all_associations(self): - """Retrieve the entire list of room alias -> room ID pairings. - - Returns: - A list of RoomAliasMappings. - """ - results = yield self._execute_and_decode( - "SELECT room_id, room_alias FROM room_aliases" - ) - - # TODO(kegan): It feels wrong to be specifying no servers here, but - # equally this function isn't required to obtain all servers so - # retrieving them "just for the sake of it" also seems wrong, but we - # want to conform to passing Objects around and not dicts.. - defer.returnValue([ - RoomAliasMapping( - room_id=r["room_id"], room_alias=r["room_alias"], servers="" - ) for r in results - ]) - def get_aliases_for_room(self, room_id): return self._simple_select_onecol( "room_aliases", diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 54cd15bc0e..029b07cc66 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -92,10 +92,6 @@ class RegistrationStore(SQLBaseStore): query, user_id ) - def get_all_users(self): - return self._simple_select_list( - table="users", keyvalues=None, retcols=["name"]) - def get_user_by_token(self, token): """Get a user from the given access token. diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 6bd0b22ae5..750b17a45f 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -71,16 +71,6 @@ class RoomStore(SQLBaseStore): RoomsTable.decode_single_result, query, room_id, ) - def get_all_rooms(self): - """Retrieve all the rooms. - - Returns: - A list of namedtuples containing the room information. - """ - return self._simple_select_list( - table="rooms", keyvalues=None, retcols=["room_id"] - ) - @defer.inlineCallbacks def get_rooms(self, is_public): """Retrieve a list of all public rooms. diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 58aa376c20..3d0172d09b 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -180,6 +180,14 @@ class RoomMemberStore(SQLBaseStore): if not membership_list: return defer.succeed(None) + return self.runInteraction( + "get_rooms_for_user_where_membership_is", + self._get_rooms_for_user_where_membership_is_txn, + user_id, membership_list + ) + + def _get_rooms_for_user_where_membership_is_txn(self, txn, user_id, + membership_list): where_clause = "user_id = ? AND (%s)" % ( " OR ".join(["membership = ?" for _ in membership_list]), ) @@ -187,24 +195,18 @@ class RoomMemberStore(SQLBaseStore): args = [user_id] args.extend(membership_list) - def f(txn): - sql = ( - "SELECT m.room_id, m.sender, m.membership" - " FROM room_memberships as m" - " INNER JOIN current_state_events as c" - " ON m.event_id = c.event_id" - " WHERE %s" - ) % (where_clause,) - - txn.execute(sql, args) - return [ - RoomsForUser(**r) for r in self.cursor_to_dict(txn) - ] + sql = ( + "SELECT m.room_id, m.sender, m.membership" + " FROM room_memberships as m" + " INNER JOIN current_state_events as c" + " ON m.event_id = c.event_id" + " WHERE %s" + ) % (where_clause,) - return self.runInteraction( - "get_rooms_for_user_where_membership_is", - f - ) + txn.execute(sql, args) + return [ + RoomsForUser(**r) for r in self.cursor_to_dict(txn) + ] def get_joined_hosts_for_room(self, room_id): return self._simple_select_onecol( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 865cb13e9e..09bc522210 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -146,18 +146,6 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], to_key)) return - # Logic: - # - We want ALL events which match the AS room_id regex - # - We want ALL events which match the rooms represented by the AS - # room_alias regex - # - We want ALL events for rooms that AS users have joined. - # This is currently supported via get_app_service_rooms (which is used - # for the Notifier listener rooms). We can't reasonably make a SQL - # query for these room IDs, so we'll pull all the events between from/to - # and filter in python. - rooms_for_as = yield self.get_app_service_rooms(service) - room_ids_for_as = [r.room_id for r in rooms_for_as] - # select all the events between from/to with a sensible limit sql = ( "SELECT e.event_id, e.room_id, e.type, s.state_key, " @@ -169,20 +157,32 @@ class StreamStore(SQLBaseStore): "limit": limit } - def app_service_interested(row): - if row["room_id"] in room_ids_for_as: - return True - - if row["type"] == EventTypes.Member: - if service.is_interested_in_user(row.get("state_key")): - return True - return False - def f(txn): + # pull out all the events between the tokens txn.execute(sql, (from_id.stream, to_id.stream,)) - rows = self.cursor_to_dict(txn) + # Logic: + # - We want ALL events which match the AS room_id regex + # - We want ALL events which match the rooms represented by the AS + # room_alias regex + # - We want ALL events for rooms that AS users have joined. + # This is currently supported via get_app_service_rooms (which is + # used for the Notifier listener rooms). We can't reasonably make a + # SQL query for these room IDs, so we'll pull all the events between + # from/to and filter in python. + rooms_for_as = self._get_app_service_rooms_txn(txn, service) + room_ids_for_as = [r.room_id for r in rooms_for_as] + + def app_service_interested(row): + if row["room_id"] in room_ids_for_as: + return True + + if row["type"] == EventTypes.Member: + if service.is_interested_in_user(row.get("state_key")): + return True + return False + ret = self._get_events_txn( txn, # apply the filter on the room id list @@ -197,7 +197,6 @@ class StreamStore(SQLBaseStore): if rows: key = "s%d" % max(r["stream_ordering"] for r in rows) - else: # Assume we didn't get anything because there was nothing to # get. @@ -266,7 +265,6 @@ class StreamStore(SQLBaseStore): if rows: key = "s%d" % max(r["stream_ordering"] for r in rows) - else: # Assume we didn't get anything because there was nothing to # get. -- cgit 1.5.1 From cb97ea3ec236c23c745e59c3a857503dd8dc3410 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Mar 2015 11:23:46 +0000 Subject: PEP8 --- synapse/storage/roommember.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 3d0172d09b..65ffb4627f 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -187,7 +187,7 @@ class RoomMemberStore(SQLBaseStore): ) def _get_rooms_for_user_where_membership_is_txn(self, txn, user_id, - membership_list): + membership_list): where_clause = "user_id = ? AND (%s)" % ( " OR ".join(["membership = ?" for _ in membership_list]), ) -- cgit 1.5.1 From c3c01641d2d49988c59826912e4e48740ab4f32a Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Mon, 2 Mar 2015 13:38:57 +0000 Subject: Run deltas and bump user_version in upgrade script --- UPGRADE.rst | 5 +++++ scripts/upgrade_appservice_db.py | 28 +++++++++++++++++++++++----- synapse/storage/__init__.py | 2 +- 3 files changed, 29 insertions(+), 6 deletions(-) (limited to 'synapse/storage') diff --git a/UPGRADE.rst b/UPGRADE.rst index 4045baf4e0..8cda8d02a0 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -12,6 +12,11 @@ Servers which use captchas will need to add their public key to:: This is required in order to support registration fallback (typically used on mobile devices). +Servers which have registered application services need to upgrade their +database as the format of stored application services has changed in Synapse. +Run ``python upgrade_appservice_db.py `` to convert to the +new format. + Upgrading to v0.7.0 =================== diff --git a/scripts/upgrade_appservice_db.py b/scripts/upgrade_appservice_db.py index acdee56d9f..ae1b91c64f 100644 --- a/scripts/upgrade_appservice_db.py +++ b/scripts/upgrade_appservice_db.py @@ -1,17 +1,28 @@ +from synapse.storage import read_schema import argparse import json import sqlite3 -def main(dbname): - con = sqlite3.connect(dbname) - cur = con.cursor() +def do_other_deltas(cursor): + cursor.execute("PRAGMA user_version") + row = cursor.fetchone() + + if row and row[0]: + user_version = row[0] + # Run every version since after the current version. + for v in range(user_version + 1, 10): + print "Running delta: %d" % (v,) + sql_script = read_schema("delta/v%d" % (v,)) + cursor.executescript(sql_script) + + +def update_app_service_table(cur): cur.execute("SELECT id, regex FROM application_services_regex") for row in cur.fetchall(): try: print "checking %s..." % row[0] json.loads(row[1]) - print "Already in new format" except ValueError: # row isn't in json, make it so. string_regex = row[1] @@ -23,13 +34,20 @@ def main(dbname): "UPDATE application_services_regex SET regex=? WHERE id=?", (new_regex, row[0]) ) + + +def main(dbname): + con = sqlite3.connect(dbname) + cur = con.cursor() + do_other_deltas(cur) + update_app_service_table(cur) + cur.execute("PRAGMA user_version = 14") cur.close() con.commit() if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("database") args = parser.parse_args() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index d16e7b8fac..3753cd28d0 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -74,7 +74,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 13 +SCHEMA_VERSION = 14 dir_path = os.path.abspath(os.path.dirname(__file__)) -- cgit 1.5.1 From 20436cdf7522fdb76e8d883cf251d9332c0ea6d3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 2 Mar 2015 15:58:12 +0000 Subject: Blank lines --- synapse/rest/client/v1/push_rule.py | 1 + synapse/storage/push_rule.py | 1 + 2 files changed, 2 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 5bfdb29907..3db38a949a 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -218,6 +218,7 @@ class PushRuleRestServlet(ClientV1RestServlet): else: raise UnrecognizedRequestError() + def _rule_spec_from_path(path): if len(path) < 2: raise UnrecognizedRequestError() diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 6c38565773..c648c9960d 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -229,6 +229,7 @@ class PushRuleStore(SQLBaseStore): {'enabled': False} ) + class RuleNotFoundException(Exception): pass -- cgit 1.5.1 From b41dc687739e92d5debcae5bb9feb21b97cbc178 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Mar 2015 16:36:19 +0000 Subject: We purposefully don't have a version 14 delta script. --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 3753cd28d0..f0b7b8fef3 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -633,7 +633,7 @@ def prepare_database(db_conn): # Run every version since after the current version. for v in range(user_version + 1, SCHEMA_VERSION + 1): - if v == 10: + if v in (10, 14,): raise UpgradeDatabaseException( "No delta for version 10" ) -- cgit 1.5.1 From 9f03553f48d774ba88ca91a14050c94c4a1b77b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Mar 2015 16:38:40 +0000 Subject: Add missing comma --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f0b7b8fef3..d6ec446bd2 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -637,7 +637,7 @@ def prepare_database(db_conn): raise UpgradeDatabaseException( "No delta for version 10" ) - sql_script = read_schema("delta/v%d" % (v)) + sql_script = read_schema("delta/v%d" % (v,)) c.executescript(sql_script) db_conn.commit() -- cgit 1.5.1 From 6fab7bd2c1102c3f3254074cc996d950805531b4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 2 Mar 2015 18:17:19 +0000 Subject: s/user_name/user/ as per mjark's comment --- synapse/push/__init__.py | 4 ++-- synapse/rest/client/v1/push_rule.py | 6 +++--- synapse/storage/push_rule.py | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index d4da05f093..ba1aac30fb 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -76,13 +76,13 @@ class Pusher(object): if ev['state_key'] != self.user_name: defer.returnValue(['dont_notify']) - rawrules = yield self.store.get_push_rules_for_user_name(self.user_name) + rawrules = yield self.store.get_push_rules_for_user(self.user_name) for r in rawrules: r['conditions'] = json.loads(r['conditions']) r['actions'] = json.loads(r['actions']) - enabled_map = yield self.store.get_push_rules_enabled_for_user_name(self.user_name) + enabled_map = yield self.store.get_push_rules_enabled_for_user(self.user_name) user = UserID.from_string(self.user_name) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 822c978e85..fef0eb6572 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -114,7 +114,7 @@ class PushRuleRestServlet(ClientV1RestServlet): # we build up the full structure and then decide which bits of it # to send which means doing unnecessary work sometimes but is # is probably not going to make a whole lot of difference - rawrules = yield self.hs.get_datastore().get_push_rules_for_user_name( + rawrules = yield self.hs.get_datastore().get_push_rules_for_user( user.to_string() ) @@ -129,7 +129,7 @@ class PushRuleRestServlet(ClientV1RestServlet): rules['global'] = _add_empty_priority_class_arrays(rules['global']) enabled_map = yield self.hs.get_datastore().\ - get_push_rules_enabled_for_user_name(user.to_string()) + get_push_rules_enabled_for_user(user.to_string()) for r in ruleslist: rulearray = None @@ -212,7 +212,7 @@ class PushRuleRestServlet(ClientV1RestServlet): def get_rule_attr(self, user_name, namespaced_rule_id, attr): if attr == 'enabled': - return self.hs.get_datastore().get_push_rule_enabled_by_user_name_rule_id( + return self.hs.get_datastore().get_push_rule_enabled_by_user_rule_id( user_name, namespaced_rule_id ) else: diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index c648c9960d..ea865b6abf 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -27,7 +27,7 @@ logger = logging.getLogger(__name__) class PushRuleStore(SQLBaseStore): @defer.inlineCallbacks - def get_push_rules_for_user_name(self, user_name): + def get_push_rules_for_user(self, user_name): sql = ( "SELECT "+",".join(PushRuleTable.fields)+" " "FROM "+PushRuleTable.table_name+" " @@ -46,7 +46,7 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(dicts) @defer.inlineCallbacks - def get_push_rules_enabled_for_user_name(self, user_name): + def get_push_rules_enabled_for_user(self, user_name): results = yield self._simple_select_list( PushRuleEnableTable.table_name, {'user_name': user_name}, @@ -57,7 +57,7 @@ class PushRuleStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_push_rule_enabled_by_user_name_rule_id(self, user_name, rule_id): + def get_push_rule_enabled_by_user_rule_id(self, user_name, rule_id): results = yield self._simple_select_list( PushRuleEnableTable.table_name, {'user_name': user_name, 'rule_id': rule_id}, -- cgit 1.5.1 From 8d33adfbbbd01a9a85e862ad2bd21ae45230e710 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Mar 2015 18:23:55 +0000 Subject: SYN-67: Begin changing the way we handle schema versioning --- synapse/storage/__init__.py | 144 ++++++++++++++++++++++++++++++++------------ 1 file changed, 106 insertions(+), 38 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index d6ec446bd2..a08c74fac1 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -45,8 +45,10 @@ from syutil.jsonutil import encode_canonical_json from synapse.crypto.event_signing import compute_event_reference_hash +import imp import logging import os +import sqlite3 logger = logging.getLogger(__name__) @@ -610,49 +612,115 @@ class UpgradeDatabaseException(PrepareDatabaseException): def prepare_database(db_conn): - """ Set up all the dbs. Since all the *.sql have IF NOT EXISTS, so we - don't have to worry about overwriting existing content. + """Prepares a database for usage. Will either create all necessary tables + or upgrade from an older schema version. """ - c = db_conn.cursor() - c.execute("PRAGMA user_version") - row = c.fetchone() + cur = db_conn.cursor() + version_info = get_schema_state(cur) - if row and row[0]: - user_version = row[0] + if version_info: + user_version, delta_files = version_info + _upgrade_existing_database(cur, user_version, delta_files) + else: + _setup_new_database(cur) + + cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,)) + db_conn.commit() + + cur.close() + + +def _setup_new_database(cur): + sql_script = "BEGIN TRANSACTION;\n" + for sql_loc in SCHEMAS: + logger.debug("Applying schema %r", sql_loc) + sql_script += read_schema(sql_loc) + sql_script += "\n" + sql_script += "COMMIT TRANSACTION;" + cur.executescript(sql_script) + + +def _upgrade_existing_database(cur, user_version, delta_files): + """Upgrades an existing database. + + Delta files can either be SQL stored in *.sql files, or python modules + in *.py. + + There can be multiple delta files per version. Synapse will keep track of + which delta files have been applied, and will apply any that haven't been + even if there has been no version bump. This is useful for development + where orthogonal schema changes may happen on separate branches. + """ + + if user_version > SCHEMA_VERSION: + raise ValueError( + "Cannot use this database as it is too " + + "new for the server to understand" + ) - if user_version > SCHEMA_VERSION: - raise ValueError( - "Cannot use this database as it is too " + - "new for the server to understand" + for v in range(user_version, SCHEMA_VERSION + 1): + delta_dir = os.path.join(dir_path, "schema", "delta", v) + directory_entries = os.listdir(delta_dir) + + for file_name in directory_entries: + relative_path = os.path.join(v, file_name) + if relative_path in delta_files: + continue + + absolute_path = os.path.join( + dir_path, "schema", "delta", relative_path, ) - elif user_version < SCHEMA_VERSION: - logger.info( - "Upgrading database from version %d", - user_version + root_name, ext = os.path.splitext(file_name) + if ext == ".py": + module_name = "synapse.storage.schema.v%d_%s" % ( + v, root_name + ) + with open(absolute_path) as schema_file: + module = imp.load_source( + module_name, absolute_path, schema_file + ) + module.run_upgrade(cur) + elif ext == ".sql": + with open(absolute_path) as schema_file: + delta_schema = schema_file.read() + cur.executescript(delta_schema) + else: + # Not a valid delta file. + logger.warn( + "Found directory entry that did not end in .py or" + " .sql: %s", + relative_path, + ) + continue + + # Mark as done. + cur.execute( + "INSERT INTO schema_version (version, file)" + " VALUES (?,?)", + (v, relative_path) ) - # Run every version since after the current version. - for v in range(user_version + 1, SCHEMA_VERSION + 1): - if v in (10, 14,): - raise UpgradeDatabaseException( - "No delta for version 10" - ) - sql_script = read_schema("delta/v%d" % (v,)) - c.executescript(sql_script) - db_conn.commit() - else: - logger.info("Database is at version %r", user_version) +def get_schema_state(txn): + sql = ( + "SELECT MAX(version), file FROM schema_version" + " WHERE version = (SELECT MAX(version) FROM schema_version)" + ) - else: - sql_script = "BEGIN TRANSACTION;\n" - for sql_loc in SCHEMAS: - logger.debug("Applying schema %r", sql_loc) - sql_script += read_schema(sql_loc) - sql_script += "\n" - sql_script += "COMMIT TRANSACTION;" - c.executescript(sql_script) - db_conn.commit() - c.execute("PRAGMA user_version = %d" % SCHEMA_VERSION) - - c.close() + try: + txn.execute(sql) + res = txn.fetchall() + + if res: + current_verison = max(r[0] for r in res) + applied_delta = [r[1] for r in res] + + return current_verison, applied_delta + except sqlite3.OperationalError: + txn.execute("PRAGMA user_version") + row = txn.fetchone() + if row and row[0]: + # FIXME: We need to create schema_version table! + return row[0], [] + + return None -- cgit 1.5.1 From 82b34e813de4dadb8ec5bce068f7113e32e60ead Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 12:04:19 +0000 Subject: SYN-67: Finish up implementing new database schema management --- scripts/upgrade_appservice_db.py | 54 ------ synapse/app/homeserver.py | 5 +- synapse/storage/__init__.py | 197 ++++++++++++++------- synapse/storage/schema/application_services.sql | 34 ---- synapse/storage/schema/current/11/event_edges.sql | 89 ++++++++++ .../storage/schema/current/11/event_signatures.sql | 65 +++++++ synapse/storage/schema/current/11/im.sql | 125 +++++++++++++ synapse/storage/schema/current/11/keys.sql | 31 ++++ .../storage/schema/current/11/media_repository.sql | 68 +++++++ synapse/storage/schema/current/11/presence.sql | 38 ++++ synapse/storage/schema/current/11/profiles.sql | 20 +++ synapse/storage/schema/current/11/redactions.sql | 22 +++ synapse/storage/schema/current/11/room_aliases.sql | 27 +++ synapse/storage/schema/current/11/state.sql | 47 +++++ synapse/storage/schema/current/11/transactions.sql | 68 +++++++ synapse/storage/schema/current/11/users.sql | 45 +++++ synapse/storage/schema/delta/11/v11.sql | 16 ++ synapse/storage/schema/delta/12/v12.sql | 67 +++++++ synapse/storage/schema/delta/13/v13.sql | 34 ++++ .../schema/delta/14/upgrade_appservice_db.py | 20 +++ synapse/storage/schema/delta/v11.sql | 16 -- synapse/storage/schema/delta/v12.sql | 67 ------- synapse/storage/schema/delta/v13.sql | 34 ---- synapse/storage/schema/delta/v2.sql | 168 ------------------ synapse/storage/schema/delta/v3.sql | 27 --- synapse/storage/schema/delta/v4.sql | 26 --- synapse/storage/schema/delta/v5.sql | 30 ---- synapse/storage/schema/delta/v6.sql | 31 ---- synapse/storage/schema/delta/v8.sql | 34 ---- synapse/storage/schema/delta/v9.sql | 79 --------- synapse/storage/schema/event_edges.sql | 89 ---------- synapse/storage/schema/event_signatures.sql | 65 ------- synapse/storage/schema/filtering.sql | 24 --- synapse/storage/schema/im.sql | 125 ------------- synapse/storage/schema/keys.sql | 31 ---- synapse/storage/schema/media_repository.sql | 68 ------- synapse/storage/schema/presence.sql | 38 ---- synapse/storage/schema/profiles.sql | 20 --- synapse/storage/schema/pusher.sql | 46 ----- synapse/storage/schema/redactions.sql | 22 --- synapse/storage/schema/rejections.sql | 21 --- synapse/storage/schema/room_aliases.sql | 27 --- synapse/storage/schema/schema_version.sql | 29 +++ synapse/storage/schema/state.sql | 47 ----- synapse/storage/schema/transactions.sql | 68 ------- synapse/storage/schema/users.sql | 45 ----- 46 files changed, 946 insertions(+), 1403 deletions(-) delete mode 100644 scripts/upgrade_appservice_db.py delete mode 100644 synapse/storage/schema/application_services.sql create mode 100644 synapse/storage/schema/current/11/event_edges.sql create mode 100644 synapse/storage/schema/current/11/event_signatures.sql create mode 100644 synapse/storage/schema/current/11/im.sql create mode 100644 synapse/storage/schema/current/11/keys.sql create mode 100644 synapse/storage/schema/current/11/media_repository.sql create mode 100644 synapse/storage/schema/current/11/presence.sql create mode 100644 synapse/storage/schema/current/11/profiles.sql create mode 100644 synapse/storage/schema/current/11/redactions.sql create mode 100644 synapse/storage/schema/current/11/room_aliases.sql create mode 100644 synapse/storage/schema/current/11/state.sql create mode 100644 synapse/storage/schema/current/11/transactions.sql create mode 100644 synapse/storage/schema/current/11/users.sql create mode 100644 synapse/storage/schema/delta/11/v11.sql create mode 100644 synapse/storage/schema/delta/12/v12.sql create mode 100644 synapse/storage/schema/delta/13/v13.sql create mode 100644 synapse/storage/schema/delta/14/upgrade_appservice_db.py delete mode 100644 synapse/storage/schema/delta/v11.sql delete mode 100644 synapse/storage/schema/delta/v12.sql delete mode 100644 synapse/storage/schema/delta/v13.sql delete mode 100644 synapse/storage/schema/delta/v2.sql delete mode 100644 synapse/storage/schema/delta/v3.sql delete mode 100644 synapse/storage/schema/delta/v4.sql delete mode 100644 synapse/storage/schema/delta/v5.sql delete mode 100644 synapse/storage/schema/delta/v6.sql delete mode 100644 synapse/storage/schema/delta/v8.sql delete mode 100644 synapse/storage/schema/delta/v9.sql delete mode 100644 synapse/storage/schema/event_edges.sql delete mode 100644 synapse/storage/schema/event_signatures.sql delete mode 100644 synapse/storage/schema/filtering.sql delete mode 100644 synapse/storage/schema/im.sql delete mode 100644 synapse/storage/schema/keys.sql delete mode 100644 synapse/storage/schema/media_repository.sql delete mode 100644 synapse/storage/schema/presence.sql delete mode 100644 synapse/storage/schema/profiles.sql delete mode 100644 synapse/storage/schema/pusher.sql delete mode 100644 synapse/storage/schema/redactions.sql delete mode 100644 synapse/storage/schema/rejections.sql delete mode 100644 synapse/storage/schema/room_aliases.sql create mode 100644 synapse/storage/schema/schema_version.sql delete mode 100644 synapse/storage/schema/state.sql delete mode 100644 synapse/storage/schema/transactions.sql delete mode 100644 synapse/storage/schema/users.sql (limited to 'synapse/storage') diff --git a/scripts/upgrade_appservice_db.py b/scripts/upgrade_appservice_db.py deleted file mode 100644 index ae1b91c64f..0000000000 --- a/scripts/upgrade_appservice_db.py +++ /dev/null @@ -1,54 +0,0 @@ -from synapse.storage import read_schema -import argparse -import json -import sqlite3 - - -def do_other_deltas(cursor): - cursor.execute("PRAGMA user_version") - row = cursor.fetchone() - - if row and row[0]: - user_version = row[0] - # Run every version since after the current version. - for v in range(user_version + 1, 10): - print "Running delta: %d" % (v,) - sql_script = read_schema("delta/v%d" % (v,)) - cursor.executescript(sql_script) - - -def update_app_service_table(cur): - cur.execute("SELECT id, regex FROM application_services_regex") - for row in cur.fetchall(): - try: - print "checking %s..." % row[0] - json.loads(row[1]) - except ValueError: - # row isn't in json, make it so. - string_regex = row[1] - new_regex = json.dumps({ - "regex": string_regex, - "exclusive": True - }) - cur.execute( - "UPDATE application_services_regex SET regex=? WHERE id=?", - (new_regex, row[0]) - ) - - -def main(dbname): - con = sqlite3.connect(dbname) - cur = con.cursor() - do_other_deltas(cur) - update_app_service_table(cur) - cur.execute("PRAGMA user_version = 14") - cur.close() - con.commit() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("database") - args = parser.parse_args() - - main(args.database) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 5695d5aff8..b3ba7dfddc 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -17,7 +17,9 @@ import sys sys.dont_write_bytecode = True -from synapse.storage import prepare_database, UpgradeDatabaseException +from synapse.storage import ( + prepare_database, prepare_sqlite3_database, UpgradeDatabaseException, +) from synapse.server import HomeServer @@ -335,6 +337,7 @@ def setup(): try: with sqlite3.connect(db_name) as db_conn: + prepare_sqlite3_database(db_conn) prepare_database(db_conn) except UpgradeDatabaseException: sys.stderr.write( diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a08c74fac1..07ccc4e2ee 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -45,35 +45,16 @@ from syutil.jsonutil import encode_canonical_json from synapse.crypto.event_signing import compute_event_reference_hash +import fnmatch import imp import logging import os -import sqlite3 +import re logger = logging.getLogger(__name__) -SCHEMAS = [ - "transactions", - "users", - "profiles", - "presence", - "im", - "room_aliases", - "keys", - "redactions", - "state", - "event_edges", - "event_signatures", - "pusher", - "media_repository", - "application_services", - "filtering", - "rejections", -] - - # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. SCHEMA_VERSION = 14 @@ -578,28 +559,15 @@ class DataStore(RoomMemberStore, RoomStore, ) -def schema_path(schema): - """ Get a filesystem path for the named database schema - - Args: - schema: Name of the database schema. - Returns: - A filesystem path pointing at a ".sql" file. - - """ - schemaPath = os.path.join(dir_path, "schema", schema + ".sql") - return schemaPath - - -def read_schema(schema): +def read_schema(path): """ Read the named database schema. Args: - schema: Name of the datbase schema. + path: Path of the database schema. Returns: A string containing the database schema. """ - with open(schema_path(schema)) as schema_file: + with open(path) as schema_file: return schema_file.read() @@ -616,11 +584,11 @@ def prepare_database(db_conn): or upgrade from an older schema version. """ cur = db_conn.cursor() - version_info = get_schema_state(cur) + version_info = get_or_create_schema_state(cur) if version_info: - user_version, delta_files = version_info - _upgrade_existing_database(cur, user_version, delta_files) + user_version, delta_files, upgraded = version_info + _upgrade_existing_database(cur, user_version, delta_files, upgraded) else: _setup_new_database(cur) @@ -631,16 +599,52 @@ def prepare_database(db_conn): def _setup_new_database(cur): + current_dir = os.path.join(dir_path, "schema", "current") + directory_entries = os.listdir(current_dir) + + valid_dirs = [] + pattern = re.compile(r"^\d+(\.sql)?$") + for filename in directory_entries: + match = pattern.match(filename) + abs_path = os.path.join(current_dir, filename) + if match and os.path.isdir(abs_path): + ver = int(match.group(0)) + if ver < SCHEMA_VERSION: + valid_dirs.append((ver, abs_path)) + + if not valid_dirs: + raise RuntimeError("Could not find a suitable current.sql") + + max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0]) + + logger.debug("Initialising schema v%d", max_current_ver) + + directory_entries = os.listdir(sql_dir) + sql_script = "BEGIN TRANSACTION;\n" - for sql_loc in SCHEMAS: + for filename in fnmatch.filter(directory_entries, "*.sql"): + sql_loc = os.path.join(sql_dir, filename) logger.debug("Applying schema %r", sql_loc) sql_script += read_schema(sql_loc) sql_script += "\n" sql_script += "COMMIT TRANSACTION;" cur.executescript(sql_script) + cur.execute( + "INSERT INTO schema_version (version, upgraded)" + " VALUES (?,?)", + (max_current_ver, False) + ) + + _upgrade_existing_database( + cur, + current_version=max_current_ver, + delta_files=[], + upgraded=False + ) + -def _upgrade_existing_database(cur, user_version, delta_files): +def _upgrade_existing_database(cur, current_version, delta_files, upgraded): """Upgrades an existing database. Delta files can either be SQL stored in *.sql files, or python modules @@ -650,20 +654,41 @@ def _upgrade_existing_database(cur, user_version, delta_files): which delta files have been applied, and will apply any that haven't been even if there has been no version bump. This is useful for development where orthogonal schema changes may happen on separate branches. + + Args: + cur (Cursor) + current_version (int): The current version of the schema + delta_files (list): A list of deltas that have already been applied + upgraded (bool): Whether the current version was generated by having + applied deltas or from full schema file. If `True` the function + will never apply delta files for the given `current_version`, since + the current_version wasn't generated by applying those delta files. """ - if user_version > SCHEMA_VERSION: + if current_version > SCHEMA_VERSION: raise ValueError( "Cannot use this database as it is too " + "new for the server to understand" ) - for v in range(user_version, SCHEMA_VERSION + 1): - delta_dir = os.path.join(dir_path, "schema", "delta", v) - directory_entries = os.listdir(delta_dir) + start_ver = current_version + if not upgraded: + start_ver += 1 + + for v in range(start_ver, SCHEMA_VERSION + 1): + logger.debug("Upgrading schema to v%d", v) + + delta_dir = os.path.join(dir_path, "schema", "delta", str(v)) + + try: + directory_entries = os.listdir(delta_dir) + except OSError: + logger.exception("Could not open delta dir for version %d", v) + raise + directory_entries.sort() for file_name in directory_entries: - relative_path = os.path.join(v, file_name) + relative_path = os.path.join(str(v), file_name) if relative_path in delta_files: continue @@ -672,17 +697,19 @@ def _upgrade_existing_database(cur, user_version, delta_files): ) root_name, ext = os.path.splitext(file_name) if ext == ".py": - module_name = "synapse.storage.schema.v%d_%s" % ( + module_name = "synapse.storage.v%d_%s" % ( v, root_name ) with open(absolute_path) as schema_file: module = imp.load_source( module_name, absolute_path, schema_file ) + logger.debug("Running script %s", relative_path) module.run_upgrade(cur) elif ext == ".sql": with open(absolute_path) as schema_file: delta_schema = schema_file.read() + logger.debug("Applying schema %s", relative_path) cur.executescript(delta_schema) else: # Not a valid delta file. @@ -695,32 +722,70 @@ def _upgrade_existing_database(cur, user_version, delta_files): # Mark as done. cur.execute( - "INSERT INTO schema_version (version, file)" + "INSERT INTO schema_deltas (version, file)" " VALUES (?,?)", (v, relative_path) ) + cur.execute( + "INSERT INTO schema_version (version, upgraded)" + " VALUES (?,?)", + (v, True) + ) + -def get_schema_state(txn): - sql = ( - "SELECT MAX(version), file FROM schema_version" - " WHERE version = (SELECT MAX(version) FROM schema_version)" +def get_or_create_schema_state(txn): + schema_path = os.path.join( + dir_path, "schema", "schema_version.sql", ) + create_schema = read_schema(schema_path) + txn.executescript(create_schema) - try: - txn.execute(sql) + txn.execute("SELECT version, upgraded FROM schema_version") + row = txn.fetchone() + current_version = int(row[0]) if row else None + upgraded = bool(row[1]) if row else None + + if current_version: + txn.execute( + "SELECT file FROM schema_deltas WHERE version >= ?", + (current_version,) + ) res = txn.fetchall() + return current_version, txn.fetchall(), upgraded - if res: - current_verison = max(r[0] for r in res) - applied_delta = [r[1] for r in res] + return None - return current_verison, applied_delta - except sqlite3.OperationalError: - txn.execute("PRAGMA user_version") - row = txn.fetchone() - if row and row[0]: - # FIXME: We need to create schema_version table! - return row[0], [] - return None +def prepare_sqlite3_database(db_conn): + """This function should be called before `prepare_database` on sqlite3 + databases. + + Since we changed the way we store the current schema version and handle + updates to schemas, we need a way to upgrade from the old method to the + new. This only affects sqlite databases since they were the only ones + supported at the time. + """ + with db_conn: + schema_path = os.path.join( + dir_path, "schema", "schema_version.sql", + ) + create_schema = read_schema(schema_path) + db_conn.executescript(create_schema) + + c = db_conn.execute("SELECT * FROM schema_version") + rows = c.fetchall() + c.close() + + if not rows: + c = db_conn.execute("PRAGMA user_version") + row = c.fetchone() + c.close() + + if row and row[0]: + ver = row[0] + db_conn.execute( + "INSERT INTO schema_version (version, upgraded)" + " VALUES (?,?)", + (row[0], False) + ) diff --git a/synapse/storage/schema/application_services.sql b/synapse/storage/schema/application_services.sql deleted file mode 100644 index e491ad5aec..0000000000 --- a/synapse/storage/schema/application_services.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS application_services( - id INTEGER PRIMARY KEY AUTOINCREMENT, - url TEXT, - token TEXT, - hs_token TEXT, - sender TEXT, - UNIQUE(token) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS application_services_regex( - id INTEGER PRIMARY KEY AUTOINCREMENT, - as_id INTEGER NOT NULL, - namespace INTEGER, /* enum[room_id|room_alias|user_id] */ - regex TEXT, - FOREIGN KEY(as_id) REFERENCES application_services(id) -); - - - diff --git a/synapse/storage/schema/current/11/event_edges.sql b/synapse/storage/schema/current/11/event_edges.sql new file mode 100644 index 0000000000..1e766d6db2 --- /dev/null +++ b/synapse/storage/schema/current/11/event_edges.sql @@ -0,0 +1,89 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_forward_extremities( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id); +CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id); + + +CREATE TABLE IF NOT EXISTS event_backward_extremities( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id); +CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id); + + +CREATE TABLE IF NOT EXISTS event_edges( + event_id TEXT NOT NULL, + prev_event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + is_state INTEGER NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, prev_event_id, room_id, is_state) +); + +CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id); +CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id); + + +CREATE TABLE IF NOT EXISTS room_depth( + room_id TEXT NOT NULL, + min_depth INTEGER NOT NULL, + CONSTRAINT uniqueness UNIQUE (room_id) +); + +CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id); + + +create TABLE IF NOT EXISTS event_destinations( + event_id TEXT NOT NULL, + destination TEXT NOT NULL, + delivered_ts INTEGER DEFAULT 0, -- or 0 if not delivered + CONSTRAINT uniqueness UNIQUE (event_id, destination) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id); + + +CREATE TABLE IF NOT EXISTS state_forward_extremities( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS st_extrem_keys ON state_forward_extremities( + room_id, type, state_key +); +CREATE INDEX IF NOT EXISTS st_extrem_id ON state_forward_extremities(event_id); + + +CREATE TABLE IF NOT EXISTS event_auth( + event_id TEXT NOT NULL, + auth_id TEXT NOT NULL, + room_id TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, auth_id, room_id) +); + +CREATE INDEX IF NOT EXISTS evauth_edges_id ON event_auth(event_id); +CREATE INDEX IF NOT EXISTS evauth_edges_auth_id ON event_auth(auth_id); \ No newline at end of file diff --git a/synapse/storage/schema/current/11/event_signatures.sql b/synapse/storage/schema/current/11/event_signatures.sql new file mode 100644 index 0000000000..c28c39c48a --- /dev/null +++ b/synapse/storage/schema/current/11/event_signatures.sql @@ -0,0 +1,65 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_content_hashes ( + event_id TEXT, + algorithm TEXT, + hash BLOB, + CONSTRAINT uniqueness UNIQUE (event_id, algorithm) +); + +CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes( + event_id +); + + +CREATE TABLE IF NOT EXISTS event_reference_hashes ( + event_id TEXT, + algorithm TEXT, + hash BLOB, + CONSTRAINT uniqueness UNIQUE (event_id, algorithm) +); + +CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes ( + event_id +); + + +CREATE TABLE IF NOT EXISTS event_signatures ( + event_id TEXT, + signature_name TEXT, + key_id TEXT, + signature BLOB, + CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id) +); + +CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures ( + event_id +); + + +CREATE TABLE IF NOT EXISTS event_edge_hashes( + event_id TEXT, + prev_event_id TEXT, + algorithm TEXT, + hash BLOB, + CONSTRAINT uniqueness UNIQUE ( + event_id, prev_event_id, algorithm + ) +); + +CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes( + event_id +); diff --git a/synapse/storage/schema/current/11/im.sql b/synapse/storage/schema/current/11/im.sql new file mode 100644 index 0000000000..dd00c1cd2f --- /dev/null +++ b/synapse/storage/schema/current/11/im.sql @@ -0,0 +1,125 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS events( + stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT, + topological_ordering INTEGER NOT NULL, + event_id TEXT NOT NULL, + type TEXT NOT NULL, + room_id TEXT NOT NULL, + content TEXT NOT NULL, + unrecognized_keys TEXT, + processed BOOL NOT NULL, + outlier BOOL NOT NULL, + depth INTEGER DEFAULT 0 NOT NULL, + CONSTRAINT ev_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id); +CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering); +CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering); +CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); + + +CREATE TABLE IF NOT EXISTS event_json( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + internal_metadata NOT NULL, + json BLOB NOT NULL, + CONSTRAINT ev_j_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id); +CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id); + + +CREATE TABLE IF NOT EXISTS state_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + prev_state TEXT +); + +CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id); +CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id); +CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type); +CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key); + + +CREATE TABLE IF NOT EXISTS current_state_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id); +CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id); +CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type); +CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key); + +CREATE TABLE IF NOT EXISTS room_memberships( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + sender TEXT NOT NULL, + room_id TEXT NOT NULL, + membership TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id); +CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id); +CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id); + +CREATE TABLE IF NOT EXISTS feedback( + event_id TEXT NOT NULL, + feedback_type TEXT, + target_event_id TEXT, + sender TEXT, + room_id TEXT +); + +CREATE TABLE IF NOT EXISTS topics( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + topic TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS topics_event_id ON topics(event_id); +CREATE INDEX IF NOT EXISTS topics_room_id ON topics(room_id); + +CREATE TABLE IF NOT EXISTS room_names( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + name TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS room_names_event_id ON room_names(event_id); +CREATE INDEX IF NOT EXISTS room_names_room_id ON room_names(room_id); + +CREATE TABLE IF NOT EXISTS rooms( + room_id TEXT PRIMARY KEY NOT NULL, + is_public INTEGER, + creator TEXT +); + +CREATE TABLE IF NOT EXISTS room_hosts( + room_id TEXT NOT NULL, + host TEXT NOT NULL, + CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE +); + +CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); diff --git a/synapse/storage/schema/current/11/keys.sql b/synapse/storage/schema/current/11/keys.sql new file mode 100644 index 0000000000..a9e0a4fe0d --- /dev/null +++ b/synapse/storage/schema/current/11/keys.sql @@ -0,0 +1,31 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS server_tls_certificates( + server_name TEXT, -- Server name. + fingerprint TEXT, -- Certificate fingerprint. + from_server TEXT, -- Which key server the certificate was fetched from. + ts_added_ms INTEGER, -- When the certifcate was added. + tls_certificate BLOB, -- DER encoded x509 certificate. + CONSTRAINT uniqueness UNIQUE (server_name, fingerprint) +); + +CREATE TABLE IF NOT EXISTS server_signature_keys( + server_name TEXT, -- Server name. + key_id TEXT, -- Key version. + from_server TEXT, -- Which key server the key was fetched form. + ts_added_ms INTEGER, -- When the key was added. + verify_key BLOB, -- NACL verification key. + CONSTRAINT uniqueness UNIQUE (server_name, key_id) +); diff --git a/synapse/storage/schema/current/11/media_repository.sql b/synapse/storage/schema/current/11/media_repository.sql new file mode 100644 index 0000000000..afdf48cbfb --- /dev/null +++ b/synapse/storage/schema/current/11/media_repository.sql @@ -0,0 +1,68 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS local_media_repository ( + media_id TEXT, -- The id used to refer to the media. + media_type TEXT, -- The MIME-type of the media. + media_length INTEGER, -- Length of the media in bytes. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + user_id TEXT, -- The user who uploaded the file. + CONSTRAINT uniqueness UNIQUE (media_id) +); + +CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_method TEXT, -- The method used to make the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + CONSTRAINT uniqueness UNIQUE ( + media_id, thumbnail_width, thumbnail_height, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); + +CREATE TABLE IF NOT EXISTS remote_media_cache ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media on that server. + media_type TEXT, -- The MIME-type of the media. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + media_length INTEGER, -- Length of the media in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE (media_origin, media_id) +); + +CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_method TEXT, -- The method used to make the thumbnail + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE ( + media_origin, media_id, thumbnail_width, thumbnail_height, + thumbnail_type, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); diff --git a/synapse/storage/schema/current/11/presence.sql b/synapse/storage/schema/current/11/presence.sql new file mode 100644 index 0000000000..f9f8db9697 --- /dev/null +++ b/synapse/storage/schema/current/11/presence.sql @@ -0,0 +1,38 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS presence( + user_id INTEGER NOT NULL, + state INTEGER, + status_msg TEXT, + mtime INTEGER, -- miliseconds since last state change + FOREIGN KEY(user_id) REFERENCES users(id) +); + +-- For each of /my/ users which possibly-remote users are allowed to see their +-- presence state +CREATE TABLE IF NOT EXISTS presence_allow_inbound( + observed_user_id INTEGER NOT NULL, + observer_user_id TEXT, -- a UserID, + FOREIGN KEY(observed_user_id) REFERENCES users(id) +); + +-- For each of /my/ users (watcher), which possibly-remote users are they +-- watching? +CREATE TABLE IF NOT EXISTS presence_list( + user_id INTEGER NOT NULL, + observed_user_id TEXT, -- a UserID, + accepted BOOLEAN, + FOREIGN KEY(user_id) REFERENCES users(id) +); diff --git a/synapse/storage/schema/current/11/profiles.sql b/synapse/storage/schema/current/11/profiles.sql new file mode 100644 index 0000000000..f06a528b4d --- /dev/null +++ b/synapse/storage/schema/current/11/profiles.sql @@ -0,0 +1,20 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS profiles( + user_id INTEGER NOT NULL, + displayname TEXT, + avatar_url TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); diff --git a/synapse/storage/schema/current/11/redactions.sql b/synapse/storage/schema/current/11/redactions.sql new file mode 100644 index 0000000000..5011d95db8 --- /dev/null +++ b/synapse/storage/schema/current/11/redactions.sql @@ -0,0 +1,22 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS redactions ( + event_id TEXT NOT NULL, + redacts TEXT NOT NULL, + CONSTRAINT ev_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id); +CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts); diff --git a/synapse/storage/schema/current/11/room_aliases.sql b/synapse/storage/schema/current/11/room_aliases.sql new file mode 100644 index 0000000000..0d2df01603 --- /dev/null +++ b/synapse/storage/schema/current/11/room_aliases.sql @@ -0,0 +1,27 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS room_aliases( + room_alias TEXT NOT NULL, + room_id TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS room_alias_servers( + room_alias TEXT NOT NULL, + server TEXT NOT NULL +); + + + diff --git a/synapse/storage/schema/current/11/state.sql b/synapse/storage/schema/current/11/state.sql new file mode 100644 index 0000000000..1fe8f1e430 --- /dev/null +++ b/synapse/storage/schema/current/11/state.sql @@ -0,0 +1,47 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS state_groups( + id INTEGER PRIMARY KEY, + room_id TEXT NOT NULL, + event_id TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS state_groups_state( + state_group INTEGER NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + event_id TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS event_to_state_groups( + event_id TEXT NOT NULL, + state_group INTEGER NOT NULL, + CONSTRAINT event_to_state_groups_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id); + +CREATE INDEX IF NOT EXISTS state_groups_state_id ON state_groups_state( + state_group +); +CREATE INDEX IF NOT EXISTS state_groups_state_tuple ON state_groups_state( + room_id, type, state_key +); + +CREATE INDEX IF NOT EXISTS event_to_state_groups_id ON event_to_state_groups( + event_id +); \ No newline at end of file diff --git a/synapse/storage/schema/current/11/transactions.sql b/synapse/storage/schema/current/11/transactions.sql new file mode 100644 index 0000000000..2d30f99b06 --- /dev/null +++ b/synapse/storage/schema/current/11/transactions.sql @@ -0,0 +1,68 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Stores what transaction ids we have received and what our response was +CREATE TABLE IF NOT EXISTS received_transactions( + transaction_id TEXT, + origin TEXT, + ts INTEGER, + response_code INTEGER, + response_json TEXT, + has_been_referenced BOOL default 0, -- Whether thishas been referenced by a prev_tx + CONSTRAINT uniquesss UNIQUE (transaction_id, origin) ON CONFLICT REPLACE +); + +CREATE UNIQUE INDEX IF NOT EXISTS transactions_txid ON received_transactions(transaction_id, origin); +CREATE INDEX IF NOT EXISTS transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0; + + +-- Stores what transactions we've sent, what their response was (if we got one) and whether we have +-- since referenced the transaction in another outgoing transaction +CREATE TABLE IF NOT EXISTS sent_transactions( + id INTEGER PRIMARY KEY AUTOINCREMENT, -- This is used to apply insertion ordering + transaction_id TEXT, + destination TEXT, + response_code INTEGER DEFAULT 0, + response_json TEXT, + ts INTEGER +); + +CREATE INDEX IF NOT EXISTS sent_transaction_dest ON sent_transactions(destination); +CREATE INDEX IF NOT EXISTS sent_transaction_dest_referenced ON sent_transactions( + destination +); +CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); +-- So that we can do an efficient look up of all transactions that have yet to be successfully +-- sent. +CREATE INDEX IF NOT EXISTS sent_transaction_sent ON sent_transactions(response_code); + + +-- For sent transactions only. +CREATE TABLE IF NOT EXISTS transaction_id_to_pdu( + transaction_id INTEGER, + destination TEXT, + pdu_id TEXT, + pdu_origin TEXT +); + +CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination); +CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); +CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_index ON transaction_id_to_pdu(transaction_id, destination); + +-- To track destination health +CREATE TABLE IF NOT EXISTS destinations( + destination TEXT PRIMARY KEY, + retry_last_ts INTEGER, + retry_interval INTEGER +); diff --git a/synapse/storage/schema/current/11/users.sql b/synapse/storage/schema/current/11/users.sql new file mode 100644 index 0000000000..08ccfdac0a --- /dev/null +++ b/synapse/storage/schema/current/11/users.sql @@ -0,0 +1,45 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS users( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT, + password_hash TEXT, + creation_ts INTEGER, + admin BOOL DEFAULT 0 NOT NULL, + UNIQUE(name) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS access_tokens( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + device_id TEXT, + token TEXT NOT NULL, + last_used INTEGER, + FOREIGN KEY(user_id) REFERENCES users(id), + UNIQUE(token) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS user_ips ( + user TEXT NOT NULL, + access_token TEXT NOT NULL, + device_id TEXT, + ip TEXT NOT NULL, + user_agent TEXT NOT NULL, + last_seen INTEGER NOT NULL, + CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user); + diff --git a/synapse/storage/schema/delta/11/v11.sql b/synapse/storage/schema/delta/11/v11.sql new file mode 100644 index 0000000000..313592221b --- /dev/null +++ b/synapse/storage/schema/delta/11/v11.sql @@ -0,0 +1,16 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); \ No newline at end of file diff --git a/synapse/storage/schema/delta/12/v12.sql b/synapse/storage/schema/delta/12/v12.sql new file mode 100644 index 0000000000..b87ef1fe79 --- /dev/null +++ b/synapse/storage/schema/delta/12/v12.sql @@ -0,0 +1,67 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS rejections( + event_id TEXT NOT NULL, + reason TEXT NOT NULL, + last_check TEXT NOT NULL, + CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE +); + +-- Push notification endpoints that users have configured +CREATE TABLE IF NOT EXISTS pushers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + profile_tag varchar(32) NOT NULL, + kind varchar(8) NOT NULL, + app_id varchar(64) NOT NULL, + app_display_name varchar(64) NOT NULL, + device_display_name varchar(128) NOT NULL, + pushkey blob NOT NULL, + ts BIGINT NOT NULL, + lang varchar(8), + data blob, + last_token TEXT, + last_success BIGINT, + failing_since BIGINT, + FOREIGN KEY(user_name) REFERENCES users(name), + UNIQUE (app_id, pushkey) +); + +CREATE TABLE IF NOT EXISTS push_rules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + priority_class TINYINT NOT NULL, + priority INTEGER NOT NULL DEFAULT 0, + conditions TEXT NOT NULL, + actions TEXT NOT NULL, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); + +CREATE TABLE IF NOT EXISTS user_filters( + user_id TEXT, + filter_id INTEGER, + filter_json TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( + user_id, filter_id +); + +PRAGMA user_version = 12; diff --git a/synapse/storage/schema/delta/13/v13.sql b/synapse/storage/schema/delta/13/v13.sql new file mode 100644 index 0000000000..e491ad5aec --- /dev/null +++ b/synapse/storage/schema/delta/13/v13.sql @@ -0,0 +1,34 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS application_services( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT, + token TEXT, + hs_token TEXT, + sender TEXT, + UNIQUE(token) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS application_services_regex( + id INTEGER PRIMARY KEY AUTOINCREMENT, + as_id INTEGER NOT NULL, + namespace INTEGER, /* enum[room_id|room_alias|user_id] */ + regex TEXT, + FOREIGN KEY(as_id) REFERENCES application_services(id) +); + + + diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py new file mode 100644 index 0000000000..55e43c41ab --- /dev/null +++ b/synapse/storage/schema/delta/14/upgrade_appservice_db.py @@ -0,0 +1,20 @@ +import json + + +def run_upgrade(cur): + cur.execute("SELECT id, regex FROM application_services_regex") + for row in cur.fetchall(): + try: + print "checking %s..." % row[0] + json.loads(row[1]) + except ValueError: + # row isn't in json, make it so. + string_regex = row[1] + new_regex = json.dumps({ + "regex": string_regex, + "exclusive": True + }) + cur.execute( + "UPDATE application_services_regex SET regex=? WHERE id=?", + (new_regex, row[0]) + ) diff --git a/synapse/storage/schema/delta/v11.sql b/synapse/storage/schema/delta/v11.sql deleted file mode 100644 index 313592221b..0000000000 --- a/synapse/storage/schema/delta/v11.sql +++ /dev/null @@ -1,16 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); \ No newline at end of file diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/v12.sql deleted file mode 100644 index b87ef1fe79..0000000000 --- a/synapse/storage/schema/delta/v12.sql +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS rejections( - event_id TEXT NOT NULL, - reason TEXT NOT NULL, - last_check TEXT NOT NULL, - CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE -); - --- Push notification endpoints that users have configured -CREATE TABLE IF NOT EXISTS pushers ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - profile_tag varchar(32) NOT NULL, - kind varchar(8) NOT NULL, - app_id varchar(64) NOT NULL, - app_display_name varchar(64) NOT NULL, - device_display_name varchar(128) NOT NULL, - pushkey blob NOT NULL, - ts BIGINT NOT NULL, - lang varchar(8), - data blob, - last_token TEXT, - last_success BIGINT, - failing_since BIGINT, - FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (app_id, pushkey) -); - -CREATE TABLE IF NOT EXISTS push_rules ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - rule_id TEXT NOT NULL, - priority_class TINYINT NOT NULL, - priority INTEGER NOT NULL DEFAULT 0, - conditions TEXT NOT NULL, - actions TEXT NOT NULL, - UNIQUE(user_name, rule_id) -); - -CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); - -CREATE TABLE IF NOT EXISTS user_filters( - user_id TEXT, - filter_id INTEGER, - filter_json TEXT, - FOREIGN KEY(user_id) REFERENCES users(id) -); - -CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( - user_id, filter_id -); - -PRAGMA user_version = 12; diff --git a/synapse/storage/schema/delta/v13.sql b/synapse/storage/schema/delta/v13.sql deleted file mode 100644 index e491ad5aec..0000000000 --- a/synapse/storage/schema/delta/v13.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS application_services( - id INTEGER PRIMARY KEY AUTOINCREMENT, - url TEXT, - token TEXT, - hs_token TEXT, - sender TEXT, - UNIQUE(token) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS application_services_regex( - id INTEGER PRIMARY KEY AUTOINCREMENT, - as_id INTEGER NOT NULL, - namespace INTEGER, /* enum[room_id|room_alias|user_id] */ - regex TEXT, - FOREIGN KEY(as_id) REFERENCES application_services(id) -); - - - diff --git a/synapse/storage/schema/delta/v2.sql b/synapse/storage/schema/delta/v2.sql deleted file mode 100644 index f740f6dd5d..0000000000 --- a/synapse/storage/schema/delta/v2.sql +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS events( - stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT, - topological_ordering INTEGER NOT NULL, - event_id TEXT NOT NULL, - type TEXT NOT NULL, - room_id TEXT NOT NULL, - content TEXT NOT NULL, - unrecognized_keys TEXT, - processed BOOL NOT NULL, - outlier BOOL NOT NULL, - CONSTRAINT ev_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id); -CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering); -CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering); -CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); - -CREATE TABLE IF NOT EXISTS state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - prev_state TEXT -); - -CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id); -CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id); -CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type); -CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key); - - -CREATE TABLE IF NOT EXISTS current_state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id); -CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id); -CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type); -CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key); - -CREATE TABLE IF NOT EXISTS room_memberships( - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - sender TEXT NOT NULL, - room_id TEXT NOT NULL, - membership TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id); -CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id); -CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id); - -CREATE TABLE IF NOT EXISTS feedback( - event_id TEXT NOT NULL, - feedback_type TEXT, - target_event_id TEXT, - sender TEXT, - room_id TEXT -); - -CREATE TABLE IF NOT EXISTS topics( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - topic TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS room_names( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - name TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS rooms( - room_id TEXT PRIMARY KEY NOT NULL, - is_public INTEGER, - creator TEXT -); - -CREATE TABLE IF NOT EXISTS room_join_rules( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - join_rule TEXT NOT NULL -); -CREATE INDEX IF NOT EXISTS room_join_rules_event_id ON room_join_rules(event_id); -CREATE INDEX IF NOT EXISTS room_join_rules_room_id ON room_join_rules(room_id); - - -CREATE TABLE IF NOT EXISTS room_power_levels( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - user_id TEXT NOT NULL, - level INTEGER NOT NULL -); -CREATE INDEX IF NOT EXISTS room_power_levels_event_id ON room_power_levels(event_id); -CREATE INDEX IF NOT EXISTS room_power_levels_room_id ON room_power_levels(room_id); -CREATE INDEX IF NOT EXISTS room_power_levels_room_user ON room_power_levels(room_id, user_id); - - -CREATE TABLE IF NOT EXISTS room_default_levels( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - level INTEGER NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_default_levels_event_id ON room_default_levels(event_id); -CREATE INDEX IF NOT EXISTS room_default_levels_room_id ON room_default_levels(room_id); - - -CREATE TABLE IF NOT EXISTS room_add_state_levels( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - level INTEGER NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_add_state_levels_event_id ON room_add_state_levels(event_id); -CREATE INDEX IF NOT EXISTS room_add_state_levels_room_id ON room_add_state_levels(room_id); - - -CREATE TABLE IF NOT EXISTS room_send_event_levels( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - level INTEGER NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_send_event_levels_event_id ON room_send_event_levels(event_id); -CREATE INDEX IF NOT EXISTS room_send_event_levels_room_id ON room_send_event_levels(room_id); - - -CREATE TABLE IF NOT EXISTS room_ops_levels( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - ban_level INTEGER, - kick_level INTEGER -); - -CREATE INDEX IF NOT EXISTS room_ops_levels_event_id ON room_ops_levels(event_id); -CREATE INDEX IF NOT EXISTS room_ops_levels_room_id ON room_ops_levels(room_id); - - -CREATE TABLE IF NOT EXISTS room_hosts( - room_id TEXT NOT NULL, - host TEXT NOT NULL, - CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE -); - -CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); - -PRAGMA user_version = 2; diff --git a/synapse/storage/schema/delta/v3.sql b/synapse/storage/schema/delta/v3.sql deleted file mode 100644 index c67e38ff52..0000000000 --- a/synapse/storage/schema/delta/v3.sql +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -CREATE INDEX IF NOT EXISTS room_aliases_alias ON room_aliases(room_alias); -CREATE INDEX IF NOT EXISTS room_aliases_id ON room_aliases(room_id); - - -CREATE INDEX IF NOT EXISTS room_alias_servers_alias ON room_alias_servers(room_alias); - -DELETE FROM room_aliases WHERE rowid NOT IN (SELECT max(rowid) FROM room_aliases GROUP BY room_alias, room_id); - -CREATE UNIQUE INDEX IF NOT EXISTS room_aliases_uniq ON room_aliases(room_alias, room_id); - -PRAGMA user_version = 3; diff --git a/synapse/storage/schema/delta/v4.sql b/synapse/storage/schema/delta/v4.sql deleted file mode 100644 index d3807b7686..0000000000 --- a/synapse/storage/schema/delta/v4.sql +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS redactions ( - event_id TEXT NOT NULL, - redacts TEXT NOT NULL, - CONSTRAINT ev_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id); -CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts); - -ALTER TABLE room_ops_levels ADD COLUMN redact_level INTEGER; - -PRAGMA user_version = 4; diff --git a/synapse/storage/schema/delta/v5.sql b/synapse/storage/schema/delta/v5.sql deleted file mode 100644 index 0874a15431..0000000000 --- a/synapse/storage/schema/delta/v5.sql +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS user_ips ( - user TEXT NOT NULL, - access_token TEXT NOT NULL, - device_id TEXT, - ip TEXT NOT NULL, - user_agent TEXT NOT NULL, - last_seen INTEGER NOT NULL, - CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user); - -ALTER TABLE users ADD COLUMN admin BOOL DEFAULT 0 NOT NULL; - -PRAGMA user_version = 5; diff --git a/synapse/storage/schema/delta/v6.sql b/synapse/storage/schema/delta/v6.sql deleted file mode 100644 index a9e0a4fe0d..0000000000 --- a/synapse/storage/schema/delta/v6.sql +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS server_tls_certificates( - server_name TEXT, -- Server name. - fingerprint TEXT, -- Certificate fingerprint. - from_server TEXT, -- Which key server the certificate was fetched from. - ts_added_ms INTEGER, -- When the certifcate was added. - tls_certificate BLOB, -- DER encoded x509 certificate. - CONSTRAINT uniqueness UNIQUE (server_name, fingerprint) -); - -CREATE TABLE IF NOT EXISTS server_signature_keys( - server_name TEXT, -- Server name. - key_id TEXT, -- Key version. - from_server TEXT, -- Which key server the key was fetched form. - ts_added_ms INTEGER, -- When the key was added. - verify_key BLOB, -- NACL verification key. - CONSTRAINT uniqueness UNIQUE (server_name, key_id) -); diff --git a/synapse/storage/schema/delta/v8.sql b/synapse/storage/schema/delta/v8.sql deleted file mode 100644 index 1e9f8b18cb..0000000000 --- a/synapse/storage/schema/delta/v8.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - CREATE TABLE IF NOT EXISTS event_signatures_2 ( - event_id TEXT, - signature_name TEXT, - key_id TEXT, - signature BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id) -); - -INSERT INTO event_signatures_2 (event_id, signature_name, key_id, signature) -SELECT event_id, signature_name, key_id, signature FROM event_signatures; - -DROP TABLE event_signatures; -ALTER TABLE event_signatures_2 RENAME TO event_signatures; - -CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures ( - event_id -); - -PRAGMA user_version = 8; \ No newline at end of file diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql deleted file mode 100644 index 455d51a70c..0000000000 --- a/synapse/storage/schema/delta/v9.sql +++ /dev/null @@ -1,79 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- To track destination health -CREATE TABLE IF NOT EXISTS destinations( - destination TEXT PRIMARY KEY, - retry_last_ts INTEGER, - retry_interval INTEGER -); - - -CREATE TABLE IF NOT EXISTS local_media_repository ( - media_id TEXT, -- The id used to refer to the media. - media_type TEXT, -- The MIME-type of the media. - media_length INTEGER, -- Length of the media in bytes. - created_ts INTEGER, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - user_id TEXT, -- The user who uploaded the file. - CONSTRAINT uniqueness UNIQUE (media_id) -); - -CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_method TEXT, -- The method used to make the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - CONSTRAINT uniqueness UNIQUE ( - media_id, thumbnail_width, thumbnail_height, thumbnail_type - ) -); - -CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); - -CREATE TABLE IF NOT EXISTS remote_media_cache ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media on that server. - media_type TEXT, -- The MIME-type of the media. - created_ts INTEGER, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - media_length INTEGER, -- Length of the media in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - CONSTRAINT uniqueness UNIQUE (media_origin, media_id) -); - -CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_method TEXT, -- The method used to make the thumbnail - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - CONSTRAINT uniqueness UNIQUE ( - media_origin, media_id, thumbnail_width, thumbnail_height, - thumbnail_type, thumbnail_type - ) -); - -CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); - - -PRAGMA user_version = 9; diff --git a/synapse/storage/schema/event_edges.sql b/synapse/storage/schema/event_edges.sql deleted file mode 100644 index 1e766d6db2..0000000000 --- a/synapse/storage/schema/event_edges.sql +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id); -CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_backward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id); -CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_edges( - event_id TEXT NOT NULL, - prev_event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - is_state INTEGER NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, prev_event_id, room_id, is_state) -); - -CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id); -CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id); - - -CREATE TABLE IF NOT EXISTS room_depth( - room_id TEXT NOT NULL, - min_depth INTEGER NOT NULL, - CONSTRAINT uniqueness UNIQUE (room_id) -); - -CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id); - - -create TABLE IF NOT EXISTS event_destinations( - event_id TEXT NOT NULL, - destination TEXT NOT NULL, - delivered_ts INTEGER DEFAULT 0, -- or 0 if not delivered - CONSTRAINT uniqueness UNIQUE (event_id, destination) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id); - - -CREATE TABLE IF NOT EXISTS state_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS st_extrem_keys ON state_forward_extremities( - room_id, type, state_key -); -CREATE INDEX IF NOT EXISTS st_extrem_id ON state_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_auth( - event_id TEXT NOT NULL, - auth_id TEXT NOT NULL, - room_id TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, auth_id, room_id) -); - -CREATE INDEX IF NOT EXISTS evauth_edges_id ON event_auth(event_id); -CREATE INDEX IF NOT EXISTS evauth_edges_auth_id ON event_auth(auth_id); \ No newline at end of file diff --git a/synapse/storage/schema/event_signatures.sql b/synapse/storage/schema/event_signatures.sql deleted file mode 100644 index c28c39c48a..0000000000 --- a/synapse/storage/schema/event_signatures.sql +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_content_hashes ( - event_id TEXT, - algorithm TEXT, - hash BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, algorithm) -); - -CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes( - event_id -); - - -CREATE TABLE IF NOT EXISTS event_reference_hashes ( - event_id TEXT, - algorithm TEXT, - hash BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, algorithm) -); - -CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes ( - event_id -); - - -CREATE TABLE IF NOT EXISTS event_signatures ( - event_id TEXT, - signature_name TEXT, - key_id TEXT, - signature BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id) -); - -CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures ( - event_id -); - - -CREATE TABLE IF NOT EXISTS event_edge_hashes( - event_id TEXT, - prev_event_id TEXT, - algorithm TEXT, - hash BLOB, - CONSTRAINT uniqueness UNIQUE ( - event_id, prev_event_id, algorithm - ) -); - -CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes( - event_id -); diff --git a/synapse/storage/schema/filtering.sql b/synapse/storage/schema/filtering.sql deleted file mode 100644 index beb39ca201..0000000000 --- a/synapse/storage/schema/filtering.sql +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS user_filters( - user_id TEXT, - filter_id INTEGER, - filter_json TEXT, - FOREIGN KEY(user_id) REFERENCES users(id) -); - -CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters( - user_id, filter_id -); diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql deleted file mode 100644 index dd00c1cd2f..0000000000 --- a/synapse/storage/schema/im.sql +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS events( - stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT, - topological_ordering INTEGER NOT NULL, - event_id TEXT NOT NULL, - type TEXT NOT NULL, - room_id TEXT NOT NULL, - content TEXT NOT NULL, - unrecognized_keys TEXT, - processed BOOL NOT NULL, - outlier BOOL NOT NULL, - depth INTEGER DEFAULT 0 NOT NULL, - CONSTRAINT ev_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id); -CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering); -CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering); -CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); - - -CREATE TABLE IF NOT EXISTS event_json( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - internal_metadata NOT NULL, - json BLOB NOT NULL, - CONSTRAINT ev_j_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id); -CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id); - - -CREATE TABLE IF NOT EXISTS state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - prev_state TEXT -); - -CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id); -CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id); -CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type); -CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key); - - -CREATE TABLE IF NOT EXISTS current_state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id); -CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id); -CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type); -CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key); - -CREATE TABLE IF NOT EXISTS room_memberships( - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - sender TEXT NOT NULL, - room_id TEXT NOT NULL, - membership TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id); -CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id); -CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id); - -CREATE TABLE IF NOT EXISTS feedback( - event_id TEXT NOT NULL, - feedback_type TEXT, - target_event_id TEXT, - sender TEXT, - room_id TEXT -); - -CREATE TABLE IF NOT EXISTS topics( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - topic TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS topics_event_id ON topics(event_id); -CREATE INDEX IF NOT EXISTS topics_room_id ON topics(room_id); - -CREATE TABLE IF NOT EXISTS room_names( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - name TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_names_event_id ON room_names(event_id); -CREATE INDEX IF NOT EXISTS room_names_room_id ON room_names(room_id); - -CREATE TABLE IF NOT EXISTS rooms( - room_id TEXT PRIMARY KEY NOT NULL, - is_public INTEGER, - creator TEXT -); - -CREATE TABLE IF NOT EXISTS room_hosts( - room_id TEXT NOT NULL, - host TEXT NOT NULL, - CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE -); - -CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); diff --git a/synapse/storage/schema/keys.sql b/synapse/storage/schema/keys.sql deleted file mode 100644 index a9e0a4fe0d..0000000000 --- a/synapse/storage/schema/keys.sql +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS server_tls_certificates( - server_name TEXT, -- Server name. - fingerprint TEXT, -- Certificate fingerprint. - from_server TEXT, -- Which key server the certificate was fetched from. - ts_added_ms INTEGER, -- When the certifcate was added. - tls_certificate BLOB, -- DER encoded x509 certificate. - CONSTRAINT uniqueness UNIQUE (server_name, fingerprint) -); - -CREATE TABLE IF NOT EXISTS server_signature_keys( - server_name TEXT, -- Server name. - key_id TEXT, -- Key version. - from_server TEXT, -- Which key server the key was fetched form. - ts_added_ms INTEGER, -- When the key was added. - verify_key BLOB, -- NACL verification key. - CONSTRAINT uniqueness UNIQUE (server_name, key_id) -); diff --git a/synapse/storage/schema/media_repository.sql b/synapse/storage/schema/media_repository.sql deleted file mode 100644 index afdf48cbfb..0000000000 --- a/synapse/storage/schema/media_repository.sql +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS local_media_repository ( - media_id TEXT, -- The id used to refer to the media. - media_type TEXT, -- The MIME-type of the media. - media_length INTEGER, -- Length of the media in bytes. - created_ts INTEGER, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - user_id TEXT, -- The user who uploaded the file. - CONSTRAINT uniqueness UNIQUE (media_id) -); - -CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_method TEXT, -- The method used to make the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - CONSTRAINT uniqueness UNIQUE ( - media_id, thumbnail_width, thumbnail_height, thumbnail_type - ) -); - -CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); - -CREATE TABLE IF NOT EXISTS remote_media_cache ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media on that server. - media_type TEXT, -- The MIME-type of the media. - created_ts INTEGER, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - media_length INTEGER, -- Length of the media in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - CONSTRAINT uniqueness UNIQUE (media_origin, media_id) -); - -CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_method TEXT, -- The method used to make the thumbnail - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - CONSTRAINT uniqueness UNIQUE ( - media_origin, media_id, thumbnail_width, thumbnail_height, - thumbnail_type, thumbnail_type - ) -); - -CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); diff --git a/synapse/storage/schema/presence.sql b/synapse/storage/schema/presence.sql deleted file mode 100644 index f9f8db9697..0000000000 --- a/synapse/storage/schema/presence.sql +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS presence( - user_id INTEGER NOT NULL, - state INTEGER, - status_msg TEXT, - mtime INTEGER, -- miliseconds since last state change - FOREIGN KEY(user_id) REFERENCES users(id) -); - --- For each of /my/ users which possibly-remote users are allowed to see their --- presence state -CREATE TABLE IF NOT EXISTS presence_allow_inbound( - observed_user_id INTEGER NOT NULL, - observer_user_id TEXT, -- a UserID, - FOREIGN KEY(observed_user_id) REFERENCES users(id) -); - --- For each of /my/ users (watcher), which possibly-remote users are they --- watching? -CREATE TABLE IF NOT EXISTS presence_list( - user_id INTEGER NOT NULL, - observed_user_id TEXT, -- a UserID, - accepted BOOLEAN, - FOREIGN KEY(user_id) REFERENCES users(id) -); diff --git a/synapse/storage/schema/profiles.sql b/synapse/storage/schema/profiles.sql deleted file mode 100644 index f06a528b4d..0000000000 --- a/synapse/storage/schema/profiles.sql +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS profiles( - user_id INTEGER NOT NULL, - displayname TEXT, - avatar_url TEXT, - FOREIGN KEY(user_id) REFERENCES users(id) -); diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql deleted file mode 100644 index 3735b11547..0000000000 --- a/synapse/storage/schema/pusher.sql +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright 2014 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Push notification endpoints that users have configured -CREATE TABLE IF NOT EXISTS pushers ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - profile_tag varchar(32) NOT NULL, - kind varchar(8) NOT NULL, - app_id varchar(64) NOT NULL, - app_display_name varchar(64) NOT NULL, - device_display_name varchar(128) NOT NULL, - pushkey blob NOT NULL, - ts BIGINT NOT NULL, - lang varchar(8), - data blob, - last_token TEXT, - last_success BIGINT, - failing_since BIGINT, - FOREIGN KEY(user_name) REFERENCES users(name), - UNIQUE (app_id, pushkey) -); - -CREATE TABLE IF NOT EXISTS push_rules ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - rule_id TEXT NOT NULL, - priority_class TINYINT NOT NULL, - priority INTEGER NOT NULL DEFAULT 0, - conditions TEXT NOT NULL, - actions TEXT NOT NULL, - UNIQUE(user_name, rule_id) -); - -CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name); diff --git a/synapse/storage/schema/redactions.sql b/synapse/storage/schema/redactions.sql deleted file mode 100644 index 5011d95db8..0000000000 --- a/synapse/storage/schema/redactions.sql +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS redactions ( - event_id TEXT NOT NULL, - redacts TEXT NOT NULL, - CONSTRAINT ev_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id); -CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts); diff --git a/synapse/storage/schema/rejections.sql b/synapse/storage/schema/rejections.sql deleted file mode 100644 index bd2a8b1bb5..0000000000 --- a/synapse/storage/schema/rejections.sql +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS rejections( - event_id TEXT NOT NULL, - reason TEXT NOT NULL, - last_check TEXT NOT NULL, - CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE -); diff --git a/synapse/storage/schema/room_aliases.sql b/synapse/storage/schema/room_aliases.sql deleted file mode 100644 index 0d2df01603..0000000000 --- a/synapse/storage/schema/room_aliases.sql +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS room_aliases( - room_alias TEXT NOT NULL, - room_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS room_alias_servers( - room_alias TEXT NOT NULL, - server TEXT NOT NULL -); - - - diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql new file mode 100644 index 0000000000..83a8c7b7ce --- /dev/null +++ b/synapse/storage/schema/schema_version.sql @@ -0,0 +1,29 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS schema_version( + Lock char(1) NOT NULL DEFAULT 'X', -- Makes sure this table only has one row. + version INTEGER NOT NULL, + upgraded BOOL NOT NULL, -- Whether we reached this version from an upgrade or an initial schema. + CONSTRAINT schema_version_lock CHECK (Lock='X') ON CONFLICT REPLACE +); + +CREATE TABLE IF NOT EXISTS schema_deltas( + version INTEGER NOT NULL, + file TEXT NOT NULL, + CONSTRAINT schema_deltas_ver_file UNIQUE (version, file) ON CONFLICT IGNORE +); + +CREATE INDEX IF NOT EXISTS schema_deltas_ver ON schema_deltas(version); diff --git a/synapse/storage/schema/state.sql b/synapse/storage/schema/state.sql deleted file mode 100644 index 1fe8f1e430..0000000000 --- a/synapse/storage/schema/state.sql +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS state_groups( - id INTEGER PRIMARY KEY, - room_id TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS state_groups_state( - state_group INTEGER NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS event_to_state_groups( - event_id TEXT NOT NULL, - state_group INTEGER NOT NULL, - CONSTRAINT event_to_state_groups_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id); - -CREATE INDEX IF NOT EXISTS state_groups_state_id ON state_groups_state( - state_group -); -CREATE INDEX IF NOT EXISTS state_groups_state_tuple ON state_groups_state( - room_id, type, state_key -); - -CREATE INDEX IF NOT EXISTS event_to_state_groups_id ON event_to_state_groups( - event_id -); \ No newline at end of file diff --git a/synapse/storage/schema/transactions.sql b/synapse/storage/schema/transactions.sql deleted file mode 100644 index 2d30f99b06..0000000000 --- a/synapse/storage/schema/transactions.sql +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Stores what transaction ids we have received and what our response was -CREATE TABLE IF NOT EXISTS received_transactions( - transaction_id TEXT, - origin TEXT, - ts INTEGER, - response_code INTEGER, - response_json TEXT, - has_been_referenced BOOL default 0, -- Whether thishas been referenced by a prev_tx - CONSTRAINT uniquesss UNIQUE (transaction_id, origin) ON CONFLICT REPLACE -); - -CREATE UNIQUE INDEX IF NOT EXISTS transactions_txid ON received_transactions(transaction_id, origin); -CREATE INDEX IF NOT EXISTS transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0; - - --- Stores what transactions we've sent, what their response was (if we got one) and whether we have --- since referenced the transaction in another outgoing transaction -CREATE TABLE IF NOT EXISTS sent_transactions( - id INTEGER PRIMARY KEY AUTOINCREMENT, -- This is used to apply insertion ordering - transaction_id TEXT, - destination TEXT, - response_code INTEGER DEFAULT 0, - response_json TEXT, - ts INTEGER -); - -CREATE INDEX IF NOT EXISTS sent_transaction_dest ON sent_transactions(destination); -CREATE INDEX IF NOT EXISTS sent_transaction_dest_referenced ON sent_transactions( - destination -); -CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); --- So that we can do an efficient look up of all transactions that have yet to be successfully --- sent. -CREATE INDEX IF NOT EXISTS sent_transaction_sent ON sent_transactions(response_code); - - --- For sent transactions only. -CREATE TABLE IF NOT EXISTS transaction_id_to_pdu( - transaction_id INTEGER, - destination TEXT, - pdu_id TEXT, - pdu_origin TEXT -); - -CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination); -CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); -CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_index ON transaction_id_to_pdu(transaction_id, destination); - --- To track destination health -CREATE TABLE IF NOT EXISTS destinations( - destination TEXT PRIMARY KEY, - retry_last_ts INTEGER, - retry_interval INTEGER -); diff --git a/synapse/storage/schema/users.sql b/synapse/storage/schema/users.sql deleted file mode 100644 index 08ccfdac0a..0000000000 --- a/synapse/storage/schema/users.sql +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS users( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT, - password_hash TEXT, - creation_ts INTEGER, - admin BOOL DEFAULT 0 NOT NULL, - UNIQUE(name) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS access_tokens( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_id INTEGER NOT NULL, - device_id TEXT, - token TEXT NOT NULL, - last_used INTEGER, - FOREIGN KEY(user_id) REFERENCES users(id), - UNIQUE(token) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS user_ips ( - user TEXT NOT NULL, - access_token TEXT NOT NULL, - device_id TEXT, - ip TEXT NOT NULL, - user_agent TEXT NOT NULL, - last_seen INTEGER NOT NULL, - CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user); - -- cgit 1.5.1 From 811355ccd0b1a22560ba0af57fd3ad1e07ad283f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 13:11:01 +0000 Subject: Add some docs and remove unused variables --- synapse/storage/__init__.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 07ccc4e2ee..565abe81d9 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -613,7 +613,7 @@ def _setup_new_database(cur): valid_dirs.append((ver, abs_path)) if not valid_dirs: - raise RuntimeError("Could not find a suitable current.sql") + raise PrepareDatabaseException("Could not find a suitable current.sql") max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0]) @@ -684,7 +684,9 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): directory_entries = os.listdir(delta_dir) except OSError: logger.exception("Could not open delta dir for version %d", v) - raise + raise UpgradeDatabaseException( + "Could not open delta dir for version %d" % (v,) + ) directory_entries.sort() for file_name in directory_entries: @@ -697,18 +699,20 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): ) root_name, ext = os.path.splitext(file_name) if ext == ".py": + # This is a python upgrade module. We need to import into some + # package and then execute its `run_upgrade` function. module_name = "synapse.storage.v%d_%s" % ( v, root_name ) - with open(absolute_path) as schema_file: + with open(absolute_path) as python_file: module = imp.load_source( - module_name, absolute_path, schema_file + module_name, absolute_path, python_file ) logger.debug("Running script %s", relative_path) module.run_upgrade(cur) elif ext == ".sql": - with open(absolute_path) as schema_file: - delta_schema = schema_file.read() + # A plain old .sql file, just read and execute it + delta_schema = read_schema(absolute_path) logger.debug("Applying schema %s", relative_path) cur.executescript(delta_schema) else: @@ -751,7 +755,6 @@ def get_or_create_schema_state(txn): "SELECT file FROM schema_deltas WHERE version >= ?", (current_version,) ) - res = txn.fetchall() return current_version, txn.fetchall(), upgraded return None @@ -783,7 +786,6 @@ def prepare_sqlite3_database(db_conn): c.close() if row and row[0]: - ver = row[0] db_conn.execute( "INSERT INTO schema_version (version, upgraded)" " VALUES (?,?)", -- cgit 1.5.1 From c3530c3fb3f374bcb93097f23d311db979912d92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 13:34:11 +0000 Subject: More docs. Rename 'schema/current' to 'schema/full_schemas' --- synapse/storage/__init__.py | 30 +++-- synapse/storage/schema/current/11/event_edges.sql | 89 --------------- .../storage/schema/current/11/event_signatures.sql | 65 ----------- synapse/storage/schema/current/11/im.sql | 125 --------------------- synapse/storage/schema/current/11/keys.sql | 31 ----- .../storage/schema/current/11/media_repository.sql | 68 ----------- synapse/storage/schema/current/11/presence.sql | 38 ------- synapse/storage/schema/current/11/profiles.sql | 20 ---- synapse/storage/schema/current/11/redactions.sql | 22 ---- synapse/storage/schema/current/11/room_aliases.sql | 27 ----- synapse/storage/schema/current/11/state.sql | 47 -------- synapse/storage/schema/current/11/transactions.sql | 68 ----------- synapse/storage/schema/current/11/users.sql | 45 -------- 13 files changed, 23 insertions(+), 652 deletions(-) delete mode 100644 synapse/storage/schema/current/11/event_edges.sql delete mode 100644 synapse/storage/schema/current/11/event_signatures.sql delete mode 100644 synapse/storage/schema/current/11/im.sql delete mode 100644 synapse/storage/schema/current/11/keys.sql delete mode 100644 synapse/storage/schema/current/11/media_repository.sql delete mode 100644 synapse/storage/schema/current/11/presence.sql delete mode 100644 synapse/storage/schema/current/11/profiles.sql delete mode 100644 synapse/storage/schema/current/11/redactions.sql delete mode 100644 synapse/storage/schema/current/11/room_aliases.sql delete mode 100644 synapse/storage/schema/current/11/state.sql delete mode 100644 synapse/storage/schema/current/11/transactions.sql delete mode 100644 synapse/storage/schema/current/11/users.sql (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 565abe81d9..a22a0e2f39 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -55,8 +55,8 @@ import re logger = logging.getLogger(__name__) -# Remember to update this number every time an incompatible change is made to -# database schema files, so the users will be informed on server restarts. +# Remember to update this number every time an change is made to database +# schema files, so the users will be informed on server restarts. SCHEMA_VERSION = 14 dir_path = os.path.abspath(os.path.dirname(__file__)) @@ -584,7 +584,7 @@ def prepare_database(db_conn): or upgrade from an older schema version. """ cur = db_conn.cursor() - version_info = get_or_create_schema_state(cur) + version_info = _get_or_create_schema_state(cur) if version_info: user_version, delta_files, upgraded = version_info @@ -599,7 +599,17 @@ def prepare_database(db_conn): def _setup_new_database(cur): - current_dir = os.path.join(dir_path, "schema", "current") + """Sets up the database by finding a base set of "full schemas" and then + applying any necessary deltas. + + The "full_schemas" directory has subdirectories named after versions. This + function searches for the highest version less than or equal to + `SCHEMA_VERSION` and excutes all .sql files in that directory. + + The function will then apply all deltas for all versions after the base + version. + """ + current_dir = os.path.join(dir_path, "schema", "full_schemas") directory_entries = os.listdir(current_dir) valid_dirs = [] @@ -609,11 +619,15 @@ def _setup_new_database(cur): abs_path = os.path.join(current_dir, filename) if match and os.path.isdir(abs_path): ver = int(match.group(0)) - if ver < SCHEMA_VERSION: + if ver <= SCHEMA_VERSION: valid_dirs.append((ver, abs_path)) + else: + logger.warn("Unexpected entry in 'full_schemas': %s", filename) if not valid_dirs: - raise PrepareDatabaseException("Could not find a suitable current.sql") + raise PrepareDatabaseException( + "Could not find a suitable base set of full schemas" + ) max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0]) @@ -655,6 +669,8 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): even if there has been no version bump. This is useful for development where orthogonal schema changes may happen on separate branches. + This is a no-op of current_version == SCHEMA_VERSION. + Args: cur (Cursor) current_version (int): The current version of the schema @@ -738,7 +754,7 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): ) -def get_or_create_schema_state(txn): +def _get_or_create_schema_state(txn): schema_path = os.path.join( dir_path, "schema", "schema_version.sql", ) diff --git a/synapse/storage/schema/current/11/event_edges.sql b/synapse/storage/schema/current/11/event_edges.sql deleted file mode 100644 index 1e766d6db2..0000000000 --- a/synapse/storage/schema/current/11/event_edges.sql +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id); -CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_backward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id); -CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_edges( - event_id TEXT NOT NULL, - prev_event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - is_state INTEGER NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, prev_event_id, room_id, is_state) -); - -CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id); -CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id); - - -CREATE TABLE IF NOT EXISTS room_depth( - room_id TEXT NOT NULL, - min_depth INTEGER NOT NULL, - CONSTRAINT uniqueness UNIQUE (room_id) -); - -CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id); - - -create TABLE IF NOT EXISTS event_destinations( - event_id TEXT NOT NULL, - destination TEXT NOT NULL, - delivered_ts INTEGER DEFAULT 0, -- or 0 if not delivered - CONSTRAINT uniqueness UNIQUE (event_id, destination) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id); - - -CREATE TABLE IF NOT EXISTS state_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS st_extrem_keys ON state_forward_extremities( - room_id, type, state_key -); -CREATE INDEX IF NOT EXISTS st_extrem_id ON state_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_auth( - event_id TEXT NOT NULL, - auth_id TEXT NOT NULL, - room_id TEXT NOT NULL, - CONSTRAINT uniqueness UNIQUE (event_id, auth_id, room_id) -); - -CREATE INDEX IF NOT EXISTS evauth_edges_id ON event_auth(event_id); -CREATE INDEX IF NOT EXISTS evauth_edges_auth_id ON event_auth(auth_id); \ No newline at end of file diff --git a/synapse/storage/schema/current/11/event_signatures.sql b/synapse/storage/schema/current/11/event_signatures.sql deleted file mode 100644 index c28c39c48a..0000000000 --- a/synapse/storage/schema/current/11/event_signatures.sql +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_content_hashes ( - event_id TEXT, - algorithm TEXT, - hash BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, algorithm) -); - -CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes( - event_id -); - - -CREATE TABLE IF NOT EXISTS event_reference_hashes ( - event_id TEXT, - algorithm TEXT, - hash BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, algorithm) -); - -CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes ( - event_id -); - - -CREATE TABLE IF NOT EXISTS event_signatures ( - event_id TEXT, - signature_name TEXT, - key_id TEXT, - signature BLOB, - CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id) -); - -CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures ( - event_id -); - - -CREATE TABLE IF NOT EXISTS event_edge_hashes( - event_id TEXT, - prev_event_id TEXT, - algorithm TEXT, - hash BLOB, - CONSTRAINT uniqueness UNIQUE ( - event_id, prev_event_id, algorithm - ) -); - -CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes( - event_id -); diff --git a/synapse/storage/schema/current/11/im.sql b/synapse/storage/schema/current/11/im.sql deleted file mode 100644 index dd00c1cd2f..0000000000 --- a/synapse/storage/schema/current/11/im.sql +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS events( - stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT, - topological_ordering INTEGER NOT NULL, - event_id TEXT NOT NULL, - type TEXT NOT NULL, - room_id TEXT NOT NULL, - content TEXT NOT NULL, - unrecognized_keys TEXT, - processed BOOL NOT NULL, - outlier BOOL NOT NULL, - depth INTEGER DEFAULT 0 NOT NULL, - CONSTRAINT ev_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id); -CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering); -CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering); -CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); - - -CREATE TABLE IF NOT EXISTS event_json( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - internal_metadata NOT NULL, - json BLOB NOT NULL, - CONSTRAINT ev_j_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id); -CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id); - - -CREATE TABLE IF NOT EXISTS state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - prev_state TEXT -); - -CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id); -CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id); -CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type); -CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key); - - -CREATE TABLE IF NOT EXISTS current_state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id); -CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id); -CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type); -CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key); - -CREATE TABLE IF NOT EXISTS room_memberships( - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - sender TEXT NOT NULL, - room_id TEXT NOT NULL, - membership TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id); -CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id); -CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id); - -CREATE TABLE IF NOT EXISTS feedback( - event_id TEXT NOT NULL, - feedback_type TEXT, - target_event_id TEXT, - sender TEXT, - room_id TEXT -); - -CREATE TABLE IF NOT EXISTS topics( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - topic TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS topics_event_id ON topics(event_id); -CREATE INDEX IF NOT EXISTS topics_room_id ON topics(room_id); - -CREATE TABLE IF NOT EXISTS room_names( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - name TEXT NOT NULL -); - -CREATE INDEX IF NOT EXISTS room_names_event_id ON room_names(event_id); -CREATE INDEX IF NOT EXISTS room_names_room_id ON room_names(room_id); - -CREATE TABLE IF NOT EXISTS rooms( - room_id TEXT PRIMARY KEY NOT NULL, - is_public INTEGER, - creator TEXT -); - -CREATE TABLE IF NOT EXISTS room_hosts( - room_id TEXT NOT NULL, - host TEXT NOT NULL, - CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE -); - -CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); diff --git a/synapse/storage/schema/current/11/keys.sql b/synapse/storage/schema/current/11/keys.sql deleted file mode 100644 index a9e0a4fe0d..0000000000 --- a/synapse/storage/schema/current/11/keys.sql +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS server_tls_certificates( - server_name TEXT, -- Server name. - fingerprint TEXT, -- Certificate fingerprint. - from_server TEXT, -- Which key server the certificate was fetched from. - ts_added_ms INTEGER, -- When the certifcate was added. - tls_certificate BLOB, -- DER encoded x509 certificate. - CONSTRAINT uniqueness UNIQUE (server_name, fingerprint) -); - -CREATE TABLE IF NOT EXISTS server_signature_keys( - server_name TEXT, -- Server name. - key_id TEXT, -- Key version. - from_server TEXT, -- Which key server the key was fetched form. - ts_added_ms INTEGER, -- When the key was added. - verify_key BLOB, -- NACL verification key. - CONSTRAINT uniqueness UNIQUE (server_name, key_id) -); diff --git a/synapse/storage/schema/current/11/media_repository.sql b/synapse/storage/schema/current/11/media_repository.sql deleted file mode 100644 index afdf48cbfb..0000000000 --- a/synapse/storage/schema/current/11/media_repository.sql +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS local_media_repository ( - media_id TEXT, -- The id used to refer to the media. - media_type TEXT, -- The MIME-type of the media. - media_length INTEGER, -- Length of the media in bytes. - created_ts INTEGER, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - user_id TEXT, -- The user who uploaded the file. - CONSTRAINT uniqueness UNIQUE (media_id) -); - -CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_method TEXT, -- The method used to make the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - CONSTRAINT uniqueness UNIQUE ( - media_id, thumbnail_width, thumbnail_height, thumbnail_type - ) -); - -CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); - -CREATE TABLE IF NOT EXISTS remote_media_cache ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media on that server. - media_type TEXT, -- The MIME-type of the media. - created_ts INTEGER, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - media_length INTEGER, -- Length of the media in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - CONSTRAINT uniqueness UNIQUE (media_origin, media_id) -); - -CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_method TEXT, -- The method used to make the thumbnail - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - CONSTRAINT uniqueness UNIQUE ( - media_origin, media_id, thumbnail_width, thumbnail_height, - thumbnail_type, thumbnail_type - ) -); - -CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); diff --git a/synapse/storage/schema/current/11/presence.sql b/synapse/storage/schema/current/11/presence.sql deleted file mode 100644 index f9f8db9697..0000000000 --- a/synapse/storage/schema/current/11/presence.sql +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS presence( - user_id INTEGER NOT NULL, - state INTEGER, - status_msg TEXT, - mtime INTEGER, -- miliseconds since last state change - FOREIGN KEY(user_id) REFERENCES users(id) -); - --- For each of /my/ users which possibly-remote users are allowed to see their --- presence state -CREATE TABLE IF NOT EXISTS presence_allow_inbound( - observed_user_id INTEGER NOT NULL, - observer_user_id TEXT, -- a UserID, - FOREIGN KEY(observed_user_id) REFERENCES users(id) -); - --- For each of /my/ users (watcher), which possibly-remote users are they --- watching? -CREATE TABLE IF NOT EXISTS presence_list( - user_id INTEGER NOT NULL, - observed_user_id TEXT, -- a UserID, - accepted BOOLEAN, - FOREIGN KEY(user_id) REFERENCES users(id) -); diff --git a/synapse/storage/schema/current/11/profiles.sql b/synapse/storage/schema/current/11/profiles.sql deleted file mode 100644 index f06a528b4d..0000000000 --- a/synapse/storage/schema/current/11/profiles.sql +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS profiles( - user_id INTEGER NOT NULL, - displayname TEXT, - avatar_url TEXT, - FOREIGN KEY(user_id) REFERENCES users(id) -); diff --git a/synapse/storage/schema/current/11/redactions.sql b/synapse/storage/schema/current/11/redactions.sql deleted file mode 100644 index 5011d95db8..0000000000 --- a/synapse/storage/schema/current/11/redactions.sql +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS redactions ( - event_id TEXT NOT NULL, - redacts TEXT NOT NULL, - CONSTRAINT ev_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id); -CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts); diff --git a/synapse/storage/schema/current/11/room_aliases.sql b/synapse/storage/schema/current/11/room_aliases.sql deleted file mode 100644 index 0d2df01603..0000000000 --- a/synapse/storage/schema/current/11/room_aliases.sql +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS room_aliases( - room_alias TEXT NOT NULL, - room_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS room_alias_servers( - room_alias TEXT NOT NULL, - server TEXT NOT NULL -); - - - diff --git a/synapse/storage/schema/current/11/state.sql b/synapse/storage/schema/current/11/state.sql deleted file mode 100644 index 1fe8f1e430..0000000000 --- a/synapse/storage/schema/current/11/state.sql +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS state_groups( - id INTEGER PRIMARY KEY, - room_id TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS state_groups_state( - state_group INTEGER NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS event_to_state_groups( - event_id TEXT NOT NULL, - state_group INTEGER NOT NULL, - CONSTRAINT event_to_state_groups_uniq UNIQUE (event_id) -); - -CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id); - -CREATE INDEX IF NOT EXISTS state_groups_state_id ON state_groups_state( - state_group -); -CREATE INDEX IF NOT EXISTS state_groups_state_tuple ON state_groups_state( - room_id, type, state_key -); - -CREATE INDEX IF NOT EXISTS event_to_state_groups_id ON event_to_state_groups( - event_id -); \ No newline at end of file diff --git a/synapse/storage/schema/current/11/transactions.sql b/synapse/storage/schema/current/11/transactions.sql deleted file mode 100644 index 2d30f99b06..0000000000 --- a/synapse/storage/schema/current/11/transactions.sql +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Stores what transaction ids we have received and what our response was -CREATE TABLE IF NOT EXISTS received_transactions( - transaction_id TEXT, - origin TEXT, - ts INTEGER, - response_code INTEGER, - response_json TEXT, - has_been_referenced BOOL default 0, -- Whether thishas been referenced by a prev_tx - CONSTRAINT uniquesss UNIQUE (transaction_id, origin) ON CONFLICT REPLACE -); - -CREATE UNIQUE INDEX IF NOT EXISTS transactions_txid ON received_transactions(transaction_id, origin); -CREATE INDEX IF NOT EXISTS transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0; - - --- Stores what transactions we've sent, what their response was (if we got one) and whether we have --- since referenced the transaction in another outgoing transaction -CREATE TABLE IF NOT EXISTS sent_transactions( - id INTEGER PRIMARY KEY AUTOINCREMENT, -- This is used to apply insertion ordering - transaction_id TEXT, - destination TEXT, - response_code INTEGER DEFAULT 0, - response_json TEXT, - ts INTEGER -); - -CREATE INDEX IF NOT EXISTS sent_transaction_dest ON sent_transactions(destination); -CREATE INDEX IF NOT EXISTS sent_transaction_dest_referenced ON sent_transactions( - destination -); -CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); --- So that we can do an efficient look up of all transactions that have yet to be successfully --- sent. -CREATE INDEX IF NOT EXISTS sent_transaction_sent ON sent_transactions(response_code); - - --- For sent transactions only. -CREATE TABLE IF NOT EXISTS transaction_id_to_pdu( - transaction_id INTEGER, - destination TEXT, - pdu_id TEXT, - pdu_origin TEXT -); - -CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination); -CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); -CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_index ON transaction_id_to_pdu(transaction_id, destination); - --- To track destination health -CREATE TABLE IF NOT EXISTS destinations( - destination TEXT PRIMARY KEY, - retry_last_ts INTEGER, - retry_interval INTEGER -); diff --git a/synapse/storage/schema/current/11/users.sql b/synapse/storage/schema/current/11/users.sql deleted file mode 100644 index 08ccfdac0a..0000000000 --- a/synapse/storage/schema/current/11/users.sql +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright 2014, 2015 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS users( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT, - password_hash TEXT, - creation_ts INTEGER, - admin BOOL DEFAULT 0 NOT NULL, - UNIQUE(name) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS access_tokens( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_id INTEGER NOT NULL, - device_id TEXT, - token TEXT NOT NULL, - last_used INTEGER, - FOREIGN KEY(user_id) REFERENCES users(id), - UNIQUE(token) ON CONFLICT ROLLBACK -); - -CREATE TABLE IF NOT EXISTS user_ips ( - user TEXT NOT NULL, - access_token TEXT NOT NULL, - device_id TEXT, - ip TEXT NOT NULL, - user_agent TEXT NOT NULL, - last_seen INTEGER NOT NULL, - CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE -); - -CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user); - -- cgit 1.5.1 From 8c8354e85a702ef36de15db50d294e1a724ced7d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 13:34:38 +0000 Subject: Actually add full_schemas dir --- .../storage/schema/full_schemas/11/event_edges.sql | 89 +++++++++++++++ .../schema/full_schemas/11/event_signatures.sql | 65 +++++++++++ synapse/storage/schema/full_schemas/11/im.sql | 125 +++++++++++++++++++++ synapse/storage/schema/full_schemas/11/keys.sql | 31 +++++ .../schema/full_schemas/11/media_repository.sql | 68 +++++++++++ .../storage/schema/full_schemas/11/presence.sql | 38 +++++++ .../storage/schema/full_schemas/11/profiles.sql | 20 ++++ .../storage/schema/full_schemas/11/redactions.sql | 22 ++++ .../schema/full_schemas/11/room_aliases.sql | 27 +++++ synapse/storage/schema/full_schemas/11/state.sql | 47 ++++++++ .../schema/full_schemas/11/transactions.sql | 68 +++++++++++ synapse/storage/schema/full_schemas/11/users.sql | 45 ++++++++ 12 files changed, 645 insertions(+) create mode 100644 synapse/storage/schema/full_schemas/11/event_edges.sql create mode 100644 synapse/storage/schema/full_schemas/11/event_signatures.sql create mode 100644 synapse/storage/schema/full_schemas/11/im.sql create mode 100644 synapse/storage/schema/full_schemas/11/keys.sql create mode 100644 synapse/storage/schema/full_schemas/11/media_repository.sql create mode 100644 synapse/storage/schema/full_schemas/11/presence.sql create mode 100644 synapse/storage/schema/full_schemas/11/profiles.sql create mode 100644 synapse/storage/schema/full_schemas/11/redactions.sql create mode 100644 synapse/storage/schema/full_schemas/11/room_aliases.sql create mode 100644 synapse/storage/schema/full_schemas/11/state.sql create mode 100644 synapse/storage/schema/full_schemas/11/transactions.sql create mode 100644 synapse/storage/schema/full_schemas/11/users.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/full_schemas/11/event_edges.sql b/synapse/storage/schema/full_schemas/11/event_edges.sql new file mode 100644 index 0000000000..1e766d6db2 --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/event_edges.sql @@ -0,0 +1,89 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_forward_extremities( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id); +CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id); + + +CREATE TABLE IF NOT EXISTS event_backward_extremities( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id); +CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id); + + +CREATE TABLE IF NOT EXISTS event_edges( + event_id TEXT NOT NULL, + prev_event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + is_state INTEGER NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, prev_event_id, room_id, is_state) +); + +CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id); +CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id); + + +CREATE TABLE IF NOT EXISTS room_depth( + room_id TEXT NOT NULL, + min_depth INTEGER NOT NULL, + CONSTRAINT uniqueness UNIQUE (room_id) +); + +CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id); + + +create TABLE IF NOT EXISTS event_destinations( + event_id TEXT NOT NULL, + destination TEXT NOT NULL, + delivered_ts INTEGER DEFAULT 0, -- or 0 if not delivered + CONSTRAINT uniqueness UNIQUE (event_id, destination) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id); + + +CREATE TABLE IF NOT EXISTS state_forward_extremities( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS st_extrem_keys ON state_forward_extremities( + room_id, type, state_key +); +CREATE INDEX IF NOT EXISTS st_extrem_id ON state_forward_extremities(event_id); + + +CREATE TABLE IF NOT EXISTS event_auth( + event_id TEXT NOT NULL, + auth_id TEXT NOT NULL, + room_id TEXT NOT NULL, + CONSTRAINT uniqueness UNIQUE (event_id, auth_id, room_id) +); + +CREATE INDEX IF NOT EXISTS evauth_edges_id ON event_auth(event_id); +CREATE INDEX IF NOT EXISTS evauth_edges_auth_id ON event_auth(auth_id); \ No newline at end of file diff --git a/synapse/storage/schema/full_schemas/11/event_signatures.sql b/synapse/storage/schema/full_schemas/11/event_signatures.sql new file mode 100644 index 0000000000..c28c39c48a --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/event_signatures.sql @@ -0,0 +1,65 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS event_content_hashes ( + event_id TEXT, + algorithm TEXT, + hash BLOB, + CONSTRAINT uniqueness UNIQUE (event_id, algorithm) +); + +CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes( + event_id +); + + +CREATE TABLE IF NOT EXISTS event_reference_hashes ( + event_id TEXT, + algorithm TEXT, + hash BLOB, + CONSTRAINT uniqueness UNIQUE (event_id, algorithm) +); + +CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes ( + event_id +); + + +CREATE TABLE IF NOT EXISTS event_signatures ( + event_id TEXT, + signature_name TEXT, + key_id TEXT, + signature BLOB, + CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id) +); + +CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures ( + event_id +); + + +CREATE TABLE IF NOT EXISTS event_edge_hashes( + event_id TEXT, + prev_event_id TEXT, + algorithm TEXT, + hash BLOB, + CONSTRAINT uniqueness UNIQUE ( + event_id, prev_event_id, algorithm + ) +); + +CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes( + event_id +); diff --git a/synapse/storage/schema/full_schemas/11/im.sql b/synapse/storage/schema/full_schemas/11/im.sql new file mode 100644 index 0000000000..dd00c1cd2f --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/im.sql @@ -0,0 +1,125 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS events( + stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT, + topological_ordering INTEGER NOT NULL, + event_id TEXT NOT NULL, + type TEXT NOT NULL, + room_id TEXT NOT NULL, + content TEXT NOT NULL, + unrecognized_keys TEXT, + processed BOOL NOT NULL, + outlier BOOL NOT NULL, + depth INTEGER DEFAULT 0 NOT NULL, + CONSTRAINT ev_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id); +CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering); +CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering); +CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); + + +CREATE TABLE IF NOT EXISTS event_json( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + internal_metadata NOT NULL, + json BLOB NOT NULL, + CONSTRAINT ev_j_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id); +CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id); + + +CREATE TABLE IF NOT EXISTS state_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + prev_state TEXT +); + +CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id); +CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id); +CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type); +CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key); + + +CREATE TABLE IF NOT EXISTS current_state_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id); +CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id); +CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type); +CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key); + +CREATE TABLE IF NOT EXISTS room_memberships( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + sender TEXT NOT NULL, + room_id TEXT NOT NULL, + membership TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id); +CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id); +CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id); + +CREATE TABLE IF NOT EXISTS feedback( + event_id TEXT NOT NULL, + feedback_type TEXT, + target_event_id TEXT, + sender TEXT, + room_id TEXT +); + +CREATE TABLE IF NOT EXISTS topics( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + topic TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS topics_event_id ON topics(event_id); +CREATE INDEX IF NOT EXISTS topics_room_id ON topics(room_id); + +CREATE TABLE IF NOT EXISTS room_names( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + name TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS room_names_event_id ON room_names(event_id); +CREATE INDEX IF NOT EXISTS room_names_room_id ON room_names(room_id); + +CREATE TABLE IF NOT EXISTS rooms( + room_id TEXT PRIMARY KEY NOT NULL, + is_public INTEGER, + creator TEXT +); + +CREATE TABLE IF NOT EXISTS room_hosts( + room_id TEXT NOT NULL, + host TEXT NOT NULL, + CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE +); + +CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id); diff --git a/synapse/storage/schema/full_schemas/11/keys.sql b/synapse/storage/schema/full_schemas/11/keys.sql new file mode 100644 index 0000000000..a9e0a4fe0d --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/keys.sql @@ -0,0 +1,31 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS server_tls_certificates( + server_name TEXT, -- Server name. + fingerprint TEXT, -- Certificate fingerprint. + from_server TEXT, -- Which key server the certificate was fetched from. + ts_added_ms INTEGER, -- When the certifcate was added. + tls_certificate BLOB, -- DER encoded x509 certificate. + CONSTRAINT uniqueness UNIQUE (server_name, fingerprint) +); + +CREATE TABLE IF NOT EXISTS server_signature_keys( + server_name TEXT, -- Server name. + key_id TEXT, -- Key version. + from_server TEXT, -- Which key server the key was fetched form. + ts_added_ms INTEGER, -- When the key was added. + verify_key BLOB, -- NACL verification key. + CONSTRAINT uniqueness UNIQUE (server_name, key_id) +); diff --git a/synapse/storage/schema/full_schemas/11/media_repository.sql b/synapse/storage/schema/full_schemas/11/media_repository.sql new file mode 100644 index 0000000000..afdf48cbfb --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/media_repository.sql @@ -0,0 +1,68 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS local_media_repository ( + media_id TEXT, -- The id used to refer to the media. + media_type TEXT, -- The MIME-type of the media. + media_length INTEGER, -- Length of the media in bytes. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + user_id TEXT, -- The user who uploaded the file. + CONSTRAINT uniqueness UNIQUE (media_id) +); + +CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_method TEXT, -- The method used to make the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + CONSTRAINT uniqueness UNIQUE ( + media_id, thumbnail_width, thumbnail_height, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); + +CREATE TABLE IF NOT EXISTS remote_media_cache ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media on that server. + media_type TEXT, -- The MIME-type of the media. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + media_length INTEGER, -- Length of the media in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE (media_origin, media_id) +); + +CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_method TEXT, -- The method used to make the thumbnail + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE ( + media_origin, media_id, thumbnail_width, thumbnail_height, + thumbnail_type, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); diff --git a/synapse/storage/schema/full_schemas/11/presence.sql b/synapse/storage/schema/full_schemas/11/presence.sql new file mode 100644 index 0000000000..f9f8db9697 --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/presence.sql @@ -0,0 +1,38 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS presence( + user_id INTEGER NOT NULL, + state INTEGER, + status_msg TEXT, + mtime INTEGER, -- miliseconds since last state change + FOREIGN KEY(user_id) REFERENCES users(id) +); + +-- For each of /my/ users which possibly-remote users are allowed to see their +-- presence state +CREATE TABLE IF NOT EXISTS presence_allow_inbound( + observed_user_id INTEGER NOT NULL, + observer_user_id TEXT, -- a UserID, + FOREIGN KEY(observed_user_id) REFERENCES users(id) +); + +-- For each of /my/ users (watcher), which possibly-remote users are they +-- watching? +CREATE TABLE IF NOT EXISTS presence_list( + user_id INTEGER NOT NULL, + observed_user_id TEXT, -- a UserID, + accepted BOOLEAN, + FOREIGN KEY(user_id) REFERENCES users(id) +); diff --git a/synapse/storage/schema/full_schemas/11/profiles.sql b/synapse/storage/schema/full_schemas/11/profiles.sql new file mode 100644 index 0000000000..f06a528b4d --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/profiles.sql @@ -0,0 +1,20 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS profiles( + user_id INTEGER NOT NULL, + displayname TEXT, + avatar_url TEXT, + FOREIGN KEY(user_id) REFERENCES users(id) +); diff --git a/synapse/storage/schema/full_schemas/11/redactions.sql b/synapse/storage/schema/full_schemas/11/redactions.sql new file mode 100644 index 0000000000..5011d95db8 --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/redactions.sql @@ -0,0 +1,22 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS redactions ( + event_id TEXT NOT NULL, + redacts TEXT NOT NULL, + CONSTRAINT ev_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id); +CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts); diff --git a/synapse/storage/schema/full_schemas/11/room_aliases.sql b/synapse/storage/schema/full_schemas/11/room_aliases.sql new file mode 100644 index 0000000000..0d2df01603 --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/room_aliases.sql @@ -0,0 +1,27 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS room_aliases( + room_alias TEXT NOT NULL, + room_id TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS room_alias_servers( + room_alias TEXT NOT NULL, + server TEXT NOT NULL +); + + + diff --git a/synapse/storage/schema/full_schemas/11/state.sql b/synapse/storage/schema/full_schemas/11/state.sql new file mode 100644 index 0000000000..1fe8f1e430 --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/state.sql @@ -0,0 +1,47 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS state_groups( + id INTEGER PRIMARY KEY, + room_id TEXT NOT NULL, + event_id TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS state_groups_state( + state_group INTEGER NOT NULL, + room_id TEXT NOT NULL, + type TEXT NOT NULL, + state_key TEXT NOT NULL, + event_id TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS event_to_state_groups( + event_id TEXT NOT NULL, + state_group INTEGER NOT NULL, + CONSTRAINT event_to_state_groups_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id); + +CREATE INDEX IF NOT EXISTS state_groups_state_id ON state_groups_state( + state_group +); +CREATE INDEX IF NOT EXISTS state_groups_state_tuple ON state_groups_state( + room_id, type, state_key +); + +CREATE INDEX IF NOT EXISTS event_to_state_groups_id ON event_to_state_groups( + event_id +); \ No newline at end of file diff --git a/synapse/storage/schema/full_schemas/11/transactions.sql b/synapse/storage/schema/full_schemas/11/transactions.sql new file mode 100644 index 0000000000..2d30f99b06 --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/transactions.sql @@ -0,0 +1,68 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- Stores what transaction ids we have received and what our response was +CREATE TABLE IF NOT EXISTS received_transactions( + transaction_id TEXT, + origin TEXT, + ts INTEGER, + response_code INTEGER, + response_json TEXT, + has_been_referenced BOOL default 0, -- Whether thishas been referenced by a prev_tx + CONSTRAINT uniquesss UNIQUE (transaction_id, origin) ON CONFLICT REPLACE +); + +CREATE UNIQUE INDEX IF NOT EXISTS transactions_txid ON received_transactions(transaction_id, origin); +CREATE INDEX IF NOT EXISTS transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0; + + +-- Stores what transactions we've sent, what their response was (if we got one) and whether we have +-- since referenced the transaction in another outgoing transaction +CREATE TABLE IF NOT EXISTS sent_transactions( + id INTEGER PRIMARY KEY AUTOINCREMENT, -- This is used to apply insertion ordering + transaction_id TEXT, + destination TEXT, + response_code INTEGER DEFAULT 0, + response_json TEXT, + ts INTEGER +); + +CREATE INDEX IF NOT EXISTS sent_transaction_dest ON sent_transactions(destination); +CREATE INDEX IF NOT EXISTS sent_transaction_dest_referenced ON sent_transactions( + destination +); +CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); +-- So that we can do an efficient look up of all transactions that have yet to be successfully +-- sent. +CREATE INDEX IF NOT EXISTS sent_transaction_sent ON sent_transactions(response_code); + + +-- For sent transactions only. +CREATE TABLE IF NOT EXISTS transaction_id_to_pdu( + transaction_id INTEGER, + destination TEXT, + pdu_id TEXT, + pdu_origin TEXT +); + +CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination); +CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); +CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_index ON transaction_id_to_pdu(transaction_id, destination); + +-- To track destination health +CREATE TABLE IF NOT EXISTS destinations( + destination TEXT PRIMARY KEY, + retry_last_ts INTEGER, + retry_interval INTEGER +); diff --git a/synapse/storage/schema/full_schemas/11/users.sql b/synapse/storage/schema/full_schemas/11/users.sql new file mode 100644 index 0000000000..08ccfdac0a --- /dev/null +++ b/synapse/storage/schema/full_schemas/11/users.sql @@ -0,0 +1,45 @@ +/* Copyright 2014, 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS users( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT, + password_hash TEXT, + creation_ts INTEGER, + admin BOOL DEFAULT 0 NOT NULL, + UNIQUE(name) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS access_tokens( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + device_id TEXT, + token TEXT NOT NULL, + last_used INTEGER, + FOREIGN KEY(user_id) REFERENCES users(id), + UNIQUE(token) ON CONFLICT ROLLBACK +); + +CREATE TABLE IF NOT EXISTS user_ips ( + user TEXT NOT NULL, + access_token TEXT NOT NULL, + device_id TEXT, + ip TEXT NOT NULL, + user_agent TEXT NOT NULL, + last_seen INTEGER NOT NULL, + CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE +); + +CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user); + -- cgit 1.5.1 From 640e53935dc2585c7a6365980cd288e5ed176b38 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 13:43:17 +0000 Subject: Use context manager with db conn to correctly commit and rollback --- synapse/storage/__init__.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a22a0e2f39..d637e8c4f7 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -55,7 +55,7 @@ import re logger = logging.getLogger(__name__) -# Remember to update this number every time an change is made to database +# Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. SCHEMA_VERSION = 14 @@ -583,19 +583,19 @@ def prepare_database(db_conn): """Prepares a database for usage. Will either create all necessary tables or upgrade from an older schema version. """ - cur = db_conn.cursor() - version_info = _get_or_create_schema_state(cur) + with db_conn: + cur = db_conn.cursor() + version_info = _get_or_create_schema_state(cur) - if version_info: - user_version, delta_files, upgraded = version_info - _upgrade_existing_database(cur, user_version, delta_files, upgraded) - else: - _setup_new_database(cur) + if version_info: + user_version, delta_files, upgraded = version_info + _upgrade_existing_database(cur, user_version, delta_files, upgraded) + else: + _setup_new_database(cur) - cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,)) - db_conn.commit() + cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,)) - cur.close() + cur.close() def _setup_new_database(cur): -- cgit 1.5.1 From b4c38738f438ee2b035a92d6e26b0f078956575f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 13:43:35 +0000 Subject: Change to use logger in db upgrade script --- synapse/storage/schema/delta/14/upgrade_appservice_db.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py index 55e43c41ab..847b1c5b89 100644 --- a/synapse/storage/schema/delta/14/upgrade_appservice_db.py +++ b/synapse/storage/schema/delta/14/upgrade_appservice_db.py @@ -1,11 +1,14 @@ import json +import logging + +logger = logging.getLogger(__name__) def run_upgrade(cur): cur.execute("SELECT id, regex FROM application_services_regex") for row in cur.fetchall(): try: - print "checking %s..." % row[0] + logger.debug("Checking %s..." % row[0]) json.loads(row[1]) except ValueError: # row isn't in json, make it so. -- cgit 1.5.1 From 5eefd1f618a19cc27cfdaf915dc3abd3720cd0e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 13:52:18 +0000 Subject: Add unique constraint on schema_version.lock schema. Use conflict clause in sql. --- synapse/storage/__init__.py | 6 +++--- synapse/storage/schema/schema_version.sql | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index d637e8c4f7..c09228c37f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -645,7 +645,7 @@ def _setup_new_database(cur): cur.executescript(sql_script) cur.execute( - "INSERT INTO schema_version (version, upgraded)" + "INSERT OR REPLACE INTO schema_version (version, upgraded)" " VALUES (?,?)", (max_current_ver, False) ) @@ -748,7 +748,7 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): ) cur.execute( - "INSERT INTO schema_version (version, upgraded)" + "INSERT OR REPLACE INTO schema_version (version, upgraded)" " VALUES (?,?)", (v, True) ) @@ -803,7 +803,7 @@ def prepare_sqlite3_database(db_conn): if row and row[0]: db_conn.execute( - "INSERT INTO schema_version (version, upgraded)" + "INSERT OR REPLACE INTO schema_version (version, upgraded)" " VALUES (?,?)", (row[0], False) ) diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql index 83a8c7b7ce..20b1481ba5 100644 --- a/synapse/storage/schema/schema_version.sql +++ b/synapse/storage/schema/schema_version.sql @@ -17,7 +17,8 @@ CREATE TABLE IF NOT EXISTS schema_version( Lock char(1) NOT NULL DEFAULT 'X', -- Makes sure this table only has one row. version INTEGER NOT NULL, upgraded BOOL NOT NULL, -- Whether we reached this version from an upgrade or an initial schema. - CONSTRAINT schema_version_lock CHECK (Lock='X') ON CONFLICT REPLACE + CONSTRAINT schema_version_lock_x CHECK (Lock='X') + CONSTRAINT schema_version_lock_uniq UNIQUE (Lock) ); CREATE TABLE IF NOT EXISTS schema_deltas( -- cgit 1.5.1 From 16dd87d848f686835cbff82333e00c6411b1436b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 14:03:21 +0000 Subject: Don't assume db conn is a Context Manager. Twisted adbapi wrapped connections aren't context managers. --- synapse/storage/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c09228c37f..87189e54b8 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -583,7 +583,7 @@ def prepare_database(db_conn): """Prepares a database for usage. Will either create all necessary tables or upgrade from an older schema version. """ - with db_conn: + try: cur = db_conn.cursor() version_info = _get_or_create_schema_state(cur) @@ -596,6 +596,10 @@ def prepare_database(db_conn): cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,)) cur.close() + db_conn.commit() + except: + db_conn.rollback() + raise def _setup_new_database(cur): -- cgit 1.5.1 From 2a45f3d448439ebef047cc09ac62bccfa3ebac5a Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 4 Mar 2015 14:17:59 +0000 Subject: Use if not results rather than len, as per feedback. --- synapse/storage/push_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index ea865b6abf..bbf322cc84 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -63,7 +63,7 @@ class PushRuleStore(SQLBaseStore): {'user_name': user_name, 'rule_id': rule_id}, ['enabled'] ) - if len(results) == 0: + if not results: defer.returnValue(True) defer.returnValue(results[0]) -- cgit 1.5.1 From f701197227998e9fe270034be3c053cfa1f12ccf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 14:20:14 +0000 Subject: Add example directory structures in doc --- synapse/storage/__init__.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 87189e54b8..8b2f8f8963 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -608,10 +608,27 @@ def _setup_new_database(cur): The "full_schemas" directory has subdirectories named after versions. This function searches for the highest version less than or equal to - `SCHEMA_VERSION` and excutes all .sql files in that directory. + `SCHEMA_VERSION` and executes all .sql files in that directory. The function will then apply all deltas for all versions after the base version. + + Example directory structure: + + schema/ + delta/ + ... + full_schemas/ + 3/ + test.sql + ... + 11/ + foo.sql + bar.sql + ... + + In the example foo.sql and bar.sql would be run, and then any delta files + for versions strictly greater than 11. """ current_dir = os.path.join(dir_path, "schema", "full_schemas") directory_entries = os.listdir(current_dir) @@ -675,6 +692,24 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): This is a no-op of current_version == SCHEMA_VERSION. + Example directory structure: + + schema/ + delta/ + 11/ + foo.sql + ... + 12/ + foo.sql + bar.py + ... + full_schemas/ + ... + + In the example, if current_version is 11, then foo.sql will be run if and + only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in + some arbitrary order. + Args: cur (Cursor) current_version (int): The current version of the schema -- cgit 1.5.1 From 5681264faa1457ac67a66ad2474ef644f0efeb55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 14:21:53 +0000 Subject: s/%r/%s/ --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 8b2f8f8963..98b877bd08 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -659,7 +659,7 @@ def _setup_new_database(cur): sql_script = "BEGIN TRANSACTION;\n" for filename in fnmatch.filter(directory_entries, "*.sql"): sql_loc = os.path.join(sql_dir, filename) - logger.debug("Applying schema %r", sql_loc) + logger.debug("Applying schema %s", sql_loc) sql_script += read_schema(sql_loc) sql_script += "\n" sql_script += "COMMIT TRANSACTION;" -- cgit 1.5.1 From 17d319a20df6e92ddbb8ca5b9e08615d2975b466 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 15:06:22 +0000 Subject: s/schema_deltas/applied_schema_deltas/ --- synapse/storage/__init__.py | 16 +++++++++------- synapse/storage/schema/schema_version.sql | 4 ++-- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 98b877bd08..e2d5e5a41d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -674,12 +674,13 @@ def _setup_new_database(cur): _upgrade_existing_database( cur, current_version=max_current_ver, - delta_files=[], + applied_delta_files=[], upgraded=False ) -def _upgrade_existing_database(cur, current_version, delta_files, upgraded): +def _upgrade_existing_database(cur, current_version, applied_delta_files, + upgraded): """Upgrades an existing database. Delta files can either be SQL stored in *.sql files, or python modules @@ -712,8 +713,9 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): Args: cur (Cursor) - current_version (int): The current version of the schema - delta_files (list): A list of deltas that have already been applied + current_version (int): The current version of the schema. + applied_delta_files (list): A list of deltas that have already been + applied. upgraded (bool): Whether the current version was generated by having applied deltas or from full schema file. If `True` the function will never apply delta files for the given `current_version`, since @@ -746,7 +748,7 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): directory_entries.sort() for file_name in directory_entries: relative_path = os.path.join(str(v), file_name) - if relative_path in delta_files: + if relative_path in applied_delta_files: continue absolute_path = os.path.join( @@ -781,7 +783,7 @@ def _upgrade_existing_database(cur, current_version, delta_files, upgraded): # Mark as done. cur.execute( - "INSERT INTO schema_deltas (version, file)" + "INSERT INTO applied_schema_deltas (version, file)" " VALUES (?,?)", (v, relative_path) ) @@ -807,7 +809,7 @@ def _get_or_create_schema_state(txn): if current_version: txn.execute( - "SELECT file FROM schema_deltas WHERE version >= ?", + "SELECT file FROM applied_schema_deltas WHERE version >= ?", (current_version,) ) return current_version, txn.fetchall(), upgraded diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql index 20b1481ba5..0431e2d051 100644 --- a/synapse/storage/schema/schema_version.sql +++ b/synapse/storage/schema/schema_version.sql @@ -21,10 +21,10 @@ CREATE TABLE IF NOT EXISTS schema_version( CONSTRAINT schema_version_lock_uniq UNIQUE (Lock) ); -CREATE TABLE IF NOT EXISTS schema_deltas( +CREATE TABLE IF NOT EXISTS applied_schema_deltas( version INTEGER NOT NULL, file TEXT NOT NULL, CONSTRAINT schema_deltas_ver_file UNIQUE (version, file) ON CONFLICT IGNORE ); -CREATE INDEX IF NOT EXISTS schema_deltas_ver ON schema_deltas(version); +CREATE INDEX IF NOT EXISTS schema_deltas_ver ON applied_schema_deltas(version); -- cgit 1.5.1 From d56c01fff46d9adbccb8149d61f318f11040775e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Mar 2015 15:10:05 +0000 Subject: Note that we don't specify execution order --- synapse/storage/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index e2d5e5a41d..a3ff995695 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -691,6 +691,10 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files, even if there has been no version bump. This is useful for development where orthogonal schema changes may happen on separate branches. + Different delta files for the same version *must* be orthogonal and give + the same result when applied in any order. No guarantees are made on the + order of execution of these scripts. + This is a no-op of current_version == SCHEMA_VERSION. Example directory structure: -- cgit 1.5.1 From cf66ddc1b4eb95fc08d8d3f9292c5c7aa899ccd0 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 6 Mar 2015 14:11:49 +0000 Subject: Schema change as delta in v14 --- synapse/storage/schema/delta/14/v14.sql | 9 +++++++++ synapse/storage/schema/delta/next_pushrules2.sql | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) create mode 100644 synapse/storage/schema/delta/14/v14.sql delete mode 100644 synapse/storage/schema/delta/next_pushrules2.sql (limited to 'synapse/storage') diff --git a/synapse/storage/schema/delta/14/v14.sql b/synapse/storage/schema/delta/14/v14.sql new file mode 100644 index 0000000000..0212726448 --- /dev/null +++ b/synapse/storage/schema/delta/14/v14.sql @@ -0,0 +1,9 @@ +CREATE TABLE IF NOT EXISTS push_rules_enable ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_name TEXT NOT NULL, + rule_id TEXT NOT NULL, + enabled TINYINT, + UNIQUE(user_name, rule_id) +); + +CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name); diff --git a/synapse/storage/schema/delta/next_pushrules2.sql b/synapse/storage/schema/delta/next_pushrules2.sql deleted file mode 100644 index 0212726448..0000000000 --- a/synapse/storage/schema/delta/next_pushrules2.sql +++ /dev/null @@ -1,9 +0,0 @@ -CREATE TABLE IF NOT EXISTS push_rules_enable ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_name TEXT NOT NULL, - rule_id TEXT NOT NULL, - enabled TINYINT, - UNIQUE(user_name, rule_id) -); - -CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name); -- cgit 1.5.1