diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 0cc14fb692..7cb91a0be9 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -51,7 +51,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 17
+SCHEMA_VERSION = 18
dir_path = os.path.abspath(os.path.dirname(__file__))
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index b0020f51db..c9fe5a3555 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -308,6 +308,7 @@ class SQLBaseStore(object):
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self)
self._pushers_id_gen = IdGenerator("pushers", "id", self)
+ self._push_rule_id_gen = IdGenerator("push_rules", "id", self)
def start_profiling(self):
self._previous_loop_ts = self._clock.time_msec()
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 38395c66ab..626a5eaf6e 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -23,6 +23,7 @@ from synapse.crypto.event_signing import compute_event_reference_hash
from syutil.base64util import decode_base64
from syutil.jsonutil import encode_canonical_json
+from contextlib import contextmanager
import logging
@@ -41,17 +42,25 @@ class EventsStore(SQLBaseStore):
self.min_token -= 1
stream_ordering = self.min_token
+ if stream_ordering is None:
+ stream_ordering_manager = yield self._stream_id_gen.get_next(self)
+ else:
+ @contextmanager
+ def stream_ordering_manager():
+ yield stream_ordering
+
try:
- yield self.runInteraction(
- "persist_event",
- self._persist_event_txn,
- event=event,
- context=context,
- backfilled=backfilled,
- stream_ordering=stream_ordering,
- is_new_state=is_new_state,
- current_state=current_state,
- )
+ with stream_ordering_manager as stream_ordering:
+ yield self.runInteraction(
+ "persist_event",
+ self._persist_event_txn,
+ event=event,
+ context=context,
+ backfilled=backfilled,
+ stream_ordering=stream_ordering,
+ is_new_state=is_new_state,
+ current_state=current_state,
+ )
except _RollbackButIsFineException:
pass
@@ -95,15 +104,6 @@ class EventsStore(SQLBaseStore):
# Remove the any existing cache entries for the event_id
txn.call_after(self._invalidate_get_event_cache, event.event_id)
- if stream_ordering is None:
- with self._stream_id_gen.get_next_txn(txn) as stream_ordering:
- return self._persist_event_txn(
- txn, event, context, backfilled,
- stream_ordering=stream_ordering,
- is_new_state=is_new_state,
- current_state=current_state,
- )
-
# We purposefully do this first since if we include a `current_state`
# key, we *want* to update the `current_state_events` table
if current_state:
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index ee7718d5ed..34805e276e 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -19,7 +19,6 @@ from ._base import SQLBaseStore, Table
from twisted.internet import defer
import logging
-import copy
import simplejson as json
logger = logging.getLogger(__name__)
@@ -28,46 +27,45 @@ logger = logging.getLogger(__name__)
class PushRuleStore(SQLBaseStore):
@defer.inlineCallbacks
def get_push_rules_for_user(self, user_name):
- sql = (
- "SELECT "+",".join(PushRuleTable.fields)+" "
- "FROM "+PushRuleTable.table_name+" "
- "WHERE user_name = ? "
- "ORDER BY priority_class DESC, priority DESC"
+ rows = yield self._simple_select_list(
+ table=PushRuleTable.table_name,
+ keyvalues={
+ "user_name": user_name,
+ },
+ retcols=PushRuleTable.fields,
)
- rows = yield self._execute("get_push_rules_for_user", None, sql, user_name)
- dicts = []
- for r in rows:
- d = {}
- for i, f in enumerate(PushRuleTable.fields):
- d[f] = r[i]
- dicts.append(d)
+ rows.sort(
+ key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
+ )
- defer.returnValue(dicts)
+ defer.returnValue(rows)
@defer.inlineCallbacks
def get_push_rules_enabled_for_user(self, user_name):
results = yield self._simple_select_list(
- PushRuleEnableTable.table_name,
- {'user_name': user_name},
- PushRuleEnableTable.fields,
+ table=PushRuleEnableTable.table_name,
+ keyvalues={
+ 'user_name': user_name
+ },
+ retcols=PushRuleEnableTable.fields,
desc="get_push_rules_enabled_for_user",
)
- defer.returnValue(
- {r['rule_id']: False if r['enabled'] == 0 else True for r in results}
- )
+ defer.returnValue({
+ r['rule_id']: False if r['enabled'] == 0 else True for r in results
+ })
@defer.inlineCallbacks
def add_push_rule(self, before, after, **kwargs):
- vals = copy.copy(kwargs)
+ vals = kwargs
if 'conditions' in vals:
vals['conditions'] = json.dumps(vals['conditions'])
if 'actions' in vals:
vals['actions'] = json.dumps(vals['actions'])
+
# we could check the rest of the keys are valid column names
# but sqlite will do that anyway so I think it's just pointless.
- if 'id' in vals:
- del vals['id']
+ vals.pop("id", None)
if before or after:
ret = yield self.runInteraction(
@@ -87,39 +85,39 @@ class PushRuleStore(SQLBaseStore):
defer.returnValue(ret)
def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
- after = None
- relative_to_rule = None
- if 'after' in kwargs and kwargs['after']:
- after = kwargs['after']
- relative_to_rule = after
- if 'before' in kwargs and kwargs['before']:
- relative_to_rule = kwargs['before']
-
- # get the priority of the rule we're inserting after/before
- sql = (
- "SELECT priority_class, priority FROM ? "
- "WHERE user_name = ? and rule_id = ?" % (PushRuleTable.table_name,)
+ after = kwargs.pop("after", None)
+ relative_to_rule = kwargs.pop("before", after)
+
+ res = self._simple_select_one_txn(
+ txn,
+ table=PushRuleTable.table_name,
+ keyvalues={
+ "user_name": user_name,
+ "rule_id": relative_to_rule,
+ },
+ retcols=["priority_class", "priority"],
+ allow_none=True,
)
- txn.execute(sql, (user_name, relative_to_rule))
- res = txn.fetchall()
+
if not res:
raise RuleNotFoundException(
"before/after rule not found: %s" % (relative_to_rule,)
)
- priority_class, base_rule_priority = res[0]
+
+ priority_class = res["priority_class"]
+ base_rule_priority = res["priority"]
if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
raise InconsistentRuleException(
"Given priority class does not match class of relative rule"
)
- new_rule = copy.copy(kwargs)
- if 'before' in new_rule:
- del new_rule['before']
- if 'after' in new_rule:
- del new_rule['after']
+ new_rule = kwargs
+ new_rule.pop("before", None)
+ new_rule.pop("after", None)
new_rule['priority_class'] = priority_class
new_rule['user_name'] = user_name
+ new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
# check if the priority before/after is free
new_rule_priority = base_rule_priority
@@ -153,12 +151,11 @@ class PushRuleStore(SQLBaseStore):
txn.execute(sql, (user_name, priority_class, new_rule_priority))
- # now insert the new rule
- sql = "INSERT INTO "+PushRuleTable.table_name+" ("
- sql += ",".join(new_rule.keys())+") VALUES ("
- sql += ", ".join(["?" for _ in new_rule.keys()])+")"
-
- txn.execute(sql, new_rule.values())
+ self._simple_insert_txn(
+ txn,
+ table=PushRuleTable.table_name,
+ values=new_rule,
+ )
def _add_push_rule_highest_priority_txn(self, txn, user_name,
priority_class, **kwargs):
@@ -176,18 +173,17 @@ class PushRuleStore(SQLBaseStore):
new_prio = highest_prio + 1
# and insert the new rule
- new_rule = copy.copy(kwargs)
- if 'id' in new_rule:
- del new_rule['id']
+ new_rule = kwargs
+ new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
new_rule['user_name'] = user_name
new_rule['priority_class'] = priority_class
new_rule['priority'] = new_prio
- sql = "INSERT INTO "+PushRuleTable.table_name+" ("
- sql += ",".join(new_rule.keys())+") VALUES ("
- sql += ", ".join(["?" for _ in new_rule.keys()])+")"
-
- txn.execute(sql, new_rule.values())
+ self._simple_insert_txn(
+ txn,
+ table=PushRuleTable.table_name,
+ values=new_rule,
+ )
@defer.inlineCallbacks
def delete_push_rule(self, user_name, rule_id):
@@ -211,7 +207,7 @@ class PushRuleStore(SQLBaseStore):
yield self._simple_upsert(
PushRuleEnableTable.table_name,
{'user_name': user_name, 'rule_id': rule_id},
- {'enabled': enabled},
+ {'enabled': 1 if enabled else 0},
desc="set_push_rule_enabled",
)
diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
new file mode 100644
index 0000000000..c0b0fdfb69
--- /dev/null
+++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
@@ -0,0 +1,32 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS new_server_keys_json (
+ server_name TEXT NOT NULL, -- Server name.
+ key_id TEXT NOT NULL, -- Requested key id.
+ from_server TEXT NOT NULL, -- Which server the keys were fetched from.
+ ts_added_ms BIGINT NOT NULL, -- When the keys were fetched
+ ts_valid_until_ms BIGINT NOT NULL, -- When this version of the keys exipires.
+ key_json bytea NOT NULL, -- JSON certificate for the remote server.
+ CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server)
+);
+
+INSERT INTO new_server_keys_json
+ SELECT server_name, key_id, from_server,ts_added_ms, ts_valid_until_ms, key_json FROM server_keys_json ;
+
+DROP TABLE server_keys_json;
+
+ALTER TABLE new_server_keys_json RENAME TO server_keys_json;
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index e40eb8a8c4..89d1643f10 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -78,14 +78,18 @@ class StreamIdGenerator(object):
self._current_max = None
self._unfinished_ids = deque()
- def get_next_txn(self, txn):
+ @defer.inlineCallbacks
+ def get_next(self, store):
"""
Usage:
- with stream_id_gen.get_next_txn(txn) as stream_id:
+ with yield stream_id_gen.get_next as stream_id:
# ... persist event ...
"""
if not self._current_max:
- self._get_or_compute_current_max(txn)
+ yield store.runInteraction(
+ "_compute_current_max",
+ self._get_or_compute_current_max,
+ )
with self._lock:
self._current_max += 1
@@ -101,7 +105,7 @@ class StreamIdGenerator(object):
with self._lock:
self._unfinished_ids.remove(next_id)
- return manager()
+ defer.returnValue(manager())
@defer.inlineCallbacks
def get_max_token(self, store):
|