diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 1639e2c973..6b8fed4502 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -37,14 +37,17 @@ from .registration import RegistrationStore
from .room import RoomStore
from .roommember import RoomMemberStore
from .stream import StreamStore
-from .pdu import StatePduStore, PduStore, PdusTable
from .transactions import TransactionStore
from .keys import KeyStore
+from .event_federation import EventFederationStore
+
+from .state import StateStore
from .signatures import SignatureStore
from syutil.base64util import decode_base64
-from synapse.crypto.event_signing import compute_pdu_event_reference_hash
+from synapse.crypto.event_signing import compute_event_reference_hash
+
import json
import logging
@@ -56,7 +59,6 @@ logger = logging.getLogger(__name__)
SCHEMAS = [
"transactions",
- "pdu",
"users",
"profiles",
"presence",
@@ -64,7 +66,9 @@ SCHEMAS = [
"room_aliases",
"keys",
"redactions",
- "signatures",
+ "state",
+ "event_edges",
+ "event_signatures",
]
@@ -79,10 +83,12 @@ class _RollbackButIsFineException(Exception):
"""
pass
+
class DataStore(RoomMemberStore, RoomStore,
RegistrationStore, StreamStore, ProfileStore, FeedbackStore,
- PresenceStore, PduStore, StatePduStore, TransactionStore,
- DirectoryStore, KeyStore, SignatureStore):
+ PresenceStore, TransactionStore,
+ DirectoryStore, KeyStore, StateStore, SignatureStore,
+ EventFederationStore, ):
def __init__(self, hs):
super(DataStore, self).__init__(hs)
@@ -105,6 +111,7 @@ class DataStore(RoomMemberStore, RoomStore,
try:
yield self.runInteraction(
+ "persist_event",
self._persist_pdu_event_txn,
pdu=pdu,
event=event,
@@ -125,7 +132,8 @@ class DataStore(RoomMemberStore, RoomStore,
"type",
"room_id",
"content",
- "unrecognized_keys"
+ "unrecognized_keys",
+ "depth",
],
allow_none=allow_none,
)
@@ -139,68 +147,12 @@ class DataStore(RoomMemberStore, RoomStore,
def _persist_pdu_event_txn(self, txn, pdu=None, event=None,
backfilled=False, stream_ordering=None,
is_new_state=True):
- if pdu is not None:
- self._persist_event_pdu_txn(txn, pdu)
if event is not None:
return self._persist_event_txn(
txn, event, backfilled, stream_ordering,
is_new_state=is_new_state,
)
- def _persist_event_pdu_txn(self, txn, pdu):
- cols = dict(pdu.__dict__)
- unrec_keys = dict(pdu.unrecognized_keys)
- del cols["hashes"]
- del cols["signatures"]
- del cols["content"]
- del cols["prev_pdus"]
- cols["content_json"] = json.dumps(pdu.content)
-
- unrec_keys.update({
- k: v for k, v in cols.items()
- if k not in PdusTable.fields
- })
-
- cols["unrecognized_keys"] = json.dumps(unrec_keys)
-
- cols["ts"] = cols.pop("origin_server_ts")
-
- logger.debug("Persisting: %s", repr(cols))
-
- for hash_alg, hash_base64 in pdu.hashes.items():
- hash_bytes = decode_base64(hash_base64)
- self._store_pdu_content_hash_txn(
- txn, pdu.pdu_id, pdu.origin, hash_alg, hash_bytes,
- )
-
- signatures = pdu.signatures.get(pdu.origin, {})
-
- for key_id, signature_base64 in signatures.items():
- signature_bytes = decode_base64(signature_base64)
- self._store_pdu_origin_signature_txn(
- txn, pdu.pdu_id, pdu.origin, key_id, signature_bytes,
- )
-
- for prev_pdu_id, prev_origin, prev_hashes in pdu.prev_pdus:
- for alg, hash_base64 in prev_hashes.items():
- hash_bytes = decode_base64(hash_base64)
- self._store_prev_pdu_hash_txn(
- txn, pdu.pdu_id, pdu.origin, prev_pdu_id, prev_origin, alg,
- hash_bytes
- )
-
- (ref_alg, ref_hash_bytes) = compute_pdu_event_reference_hash(pdu)
- self._store_pdu_reference_hash_txn(
- txn, pdu.pdu_id, pdu.origin, ref_alg, ref_hash_bytes
- )
-
- if pdu.is_state:
- self._persist_state_txn(txn, pdu.prev_pdus, cols)
- else:
- self._persist_pdu_txn(txn, pdu.prev_pdus, cols)
-
- self._update_min_depth_for_context_txn(txn, pdu.context, pdu.depth)
-
@log_function
def _persist_event_txn(self, txn, event, backfilled, stream_ordering=None,
is_new_state=True):
@@ -225,6 +177,10 @@ class DataStore(RoomMemberStore, RoomStore,
elif event.type == RoomRedactionEvent.TYPE:
self._store_redaction(txn, event)
+ outlier = False
+ if hasattr(event, "outlier"):
+ outlier = event.outlier
+
vals = {
"topological_ordering": event.depth,
"event_id": event.event_id,
@@ -232,25 +188,33 @@ class DataStore(RoomMemberStore, RoomStore,
"room_id": event.room_id,
"content": json.dumps(event.content),
"processed": True,
+ "outlier": outlier,
+ "depth": event.depth,
}
if stream_ordering is not None:
vals["stream_ordering"] = stream_ordering
- if hasattr(event, "outlier"):
- vals["outlier"] = event.outlier
- else:
- vals["outlier"] = False
-
unrec = {
k: v
for k, v in event.get_full_dict().items()
- if k not in vals.keys() and k not in ["redacted", "redacted_because"]
+ if k not in vals.keys() and k not in [
+ "redacted",
+ "redacted_because",
+ "signatures",
+ "hashes",
+ "prev_events",
+ ]
}
vals["unrecognized_keys"] = json.dumps(unrec)
try:
- self._simple_insert_txn(txn, "events", vals)
+ self._simple_insert_txn(
+ txn,
+ "events",
+ vals,
+ or_replace=(not outlier),
+ )
except:
logger.warn(
"Failed to persist, probably duplicate: %s",
@@ -259,6 +223,16 @@ class DataStore(RoomMemberStore, RoomStore,
)
raise _RollbackButIsFineException("_persist_event")
+ self._handle_prev_events(
+ txn,
+ outlier=outlier,
+ event_id=event.event_id,
+ prev_events=event.prev_events,
+ room_id=event.room_id,
+ )
+
+ self._store_state_groups_txn(txn, event)
+
is_state = hasattr(event, "state_key") and event.state_key is not None
if is_new_state and is_state:
vals = {
@@ -284,6 +258,35 @@ class DataStore(RoomMemberStore, RoomStore,
}
)
+ for hash_alg, hash_base64 in event.hashes.items():
+ hash_bytes = decode_base64(hash_base64)
+ self._store_event_content_hash_txn(
+ txn, event.event_id, hash_alg, hash_bytes,
+ )
+
+ if hasattr(event, "signatures"):
+ signatures = event.signatures.get(event.origin, {})
+
+ for key_id, signature_base64 in signatures.items():
+ signature_bytes = decode_base64(signature_base64)
+ self._store_event_origin_signature_txn(
+ txn, event.event_id, event.origin, key_id, signature_bytes,
+ )
+
+ for prev_event_id, prev_hashes in event.prev_events:
+ for alg, hash_base64 in prev_hashes.items():
+ hash_bytes = decode_base64(hash_base64)
+ self._store_prev_event_hash_txn(
+ txn, event.event_id, prev_event_id, alg, hash_bytes
+ )
+
+ (ref_alg, ref_hash_bytes) = compute_event_reference_hash(event)
+ self._store_event_reference_hash_txn(
+ txn, event.event_id, ref_alg, ref_hash_bytes
+ )
+
+ self._update_min_depth_for_room_txn(txn, event.room_id, event.depth)
+
def _store_redaction(self, txn, event):
txn.execute(
"INSERT OR IGNORE INTO redactions "
@@ -366,29 +369,19 @@ class DataStore(RoomMemberStore, RoomStore,
"""
def _snapshot(txn):
membership_state = self._get_room_member(txn, user_id, room_id)
- prev_pdus = self._get_latest_pdus_in_context(
- txn, room_id
- )
-
- if state_type is not None and state_key is not None:
- prev_state_pdu = self._get_current_state_pdu(
- txn, room_id, state_type, state_key
- )
- else:
- prev_state_pdu = None
+ prev_events = self._get_latest_events_in_room(txn, room_id)
return Snapshot(
store=self,
room_id=room_id,
user_id=user_id,
- prev_pdus=prev_pdus,
+ prev_events=prev_events,
membership_state=membership_state,
state_type=state_type,
state_key=state_key,
- prev_state_pdu=prev_state_pdu,
)
- return self.runInteraction(_snapshot)
+ return self.runInteraction("snapshot_room", _snapshot)
class Snapshot(object):
@@ -397,7 +390,7 @@ class Snapshot(object):
store (DataStore): The datastore.
room_id (RoomId): The room of the snapshot.
user_id (UserId): The user this snapshot is for.
- prev_pdus (list): The list of PDU ids this snapshot is after.
+ prev_events (list): The list of event ids this snapshot is after.
membership_state (RoomMemberEvent): The current state of the user in
the room.
state_type (str, optional): State type captured by the snapshot
@@ -406,29 +399,29 @@ class Snapshot(object):
the previous value of the state type and key in the room.
"""
- def __init__(self, store, room_id, user_id, prev_pdus,
+ def __init__(self, store, room_id, user_id, prev_events,
membership_state, state_type=None, state_key=None,
prev_state_pdu=None):
self.store = store
self.room_id = room_id
self.user_id = user_id
- self.prev_pdus = prev_pdus
+ self.prev_events = prev_events
self.membership_state = membership_state
self.state_type = state_type
self.state_key = state_key
self.prev_state_pdu = prev_state_pdu
def fill_out_prev_events(self, event):
- if hasattr(event, "prev_pdus"):
+ if hasattr(event, "prev_events"):
return
- event.prev_pdus = [
- (p_id, origin, hashes)
- for p_id, origin, hashes, _ in self.prev_pdus
+ event.prev_events = [
+ (event_id, hashes)
+ for event_id, hashes, _ in self.prev_events
]
- if self.prev_pdus:
- event.depth = max([int(v) for _, _, _, v in self.prev_pdus]) + 1
+ if self.prev_events:
+ event.depth = max([int(v) for _, _, v in self.prev_events]) + 1
else:
event.depth = 0
@@ -487,9 +480,10 @@ def prepare_database(db_conn):
db_conn.commit()
else:
- sql_script = "BEGIN TRANSACTION;"
+ sql_script = "BEGIN TRANSACTION;\n"
for sql_loc in SCHEMAS:
sql_script += read_schema(sql_loc)
+ sql_script += "\n"
sql_script += "COMMIT TRANSACTION;"
c.executescript(sql_script)
db_conn.commit()
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 65a86e9056..464b12f032 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -19,54 +19,66 @@ from twisted.internet import defer
from synapse.api.errors import StoreError
from synapse.api.events.utils import prune_event
from synapse.util.logutils import log_function
+from syutil.base64util import encode_base64
import collections
import copy
import json
+import sys
+import time
logger = logging.getLogger(__name__)
sql_logger = logging.getLogger("synapse.storage.SQL")
+transaction_logger = logging.getLogger("synapse.storage.txn")
class LoggingTransaction(object):
"""An object that almost-transparently proxies for the 'txn' object
passed to the constructor. Adds logging to the .execute() method."""
- __slots__ = ["txn"]
+ __slots__ = ["txn", "name"]
- def __init__(self, txn):
+ def __init__(self, txn, name):
object.__setattr__(self, "txn", txn)
+ object.__setattr__(self, "name", name)
- def __getattribute__(self, name):
- if name == "execute":
- return object.__getattribute__(self, "execute")
-
- return getattr(object.__getattribute__(self, "txn"), name)
+ def __getattr__(self, name):
+ return getattr(self.txn, name)
def __setattr__(self, name, value):
- setattr(object.__getattribute__(self, "txn"), name, value)
+ setattr(self.txn, name, value)
def execute(self, sql, *args, **kwargs):
# TODO(paul): Maybe use 'info' and 'debug' for values?
- sql_logger.debug("[SQL] %s", sql)
+ sql_logger.debug("[SQL] {%s} %s", self.name, sql)
try:
if args and args[0]:
values = args[0]
- sql_logger.debug("[SQL values] " +
- ", ".join(("<%s>",) * len(values)), *values)
+ sql_logger.debug(
+ "[SQL values] {%s} " + ", ".join(("<%s>",) * len(values)),
+ self.name,
+ *values
+ )
except:
# Don't let logging failures stop SQL from working
pass
- # TODO(paul): Here would be an excellent place to put some timing
- # measurements, and log (warning?) slow queries.
- return object.__getattribute__(self, "txn").execute(
- sql, *args, **kwargs
- )
+ start = time.clock() * 1000
+ try:
+ return self.txn.execute(
+ sql, *args, **kwargs
+ )
+ except:
+ logger.exception("[SQL FAIL] {%s}", self.name)
+ raise
+ finally:
+ end = time.clock() * 1000
+ sql_logger.debug("[SQL time] {%s} %f", self.name, end - start)
class SQLBaseStore(object):
+ _TXN_ID = 0
def __init__(self, hs):
self.hs = hs
@@ -74,10 +86,30 @@ class SQLBaseStore(object):
self.event_factory = hs.get_event_factory()
self._clock = hs.get_clock()
- def runInteraction(self, func, *args, **kwargs):
+ def runInteraction(self, desc, func, *args, **kwargs):
"""Wraps the .runInteraction() method on the underlying db_pool."""
def inner_func(txn, *args, **kwargs):
- return func(LoggingTransaction(txn), *args, **kwargs)
+ start = time.clock() * 1000
+ txn_id = SQLBaseStore._TXN_ID
+
+ # We don't really need these to be unique, so lets stop it from
+ # growing really large.
+ self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1)
+
+ name = "%s-%x" % (desc, txn_id, )
+
+ transaction_logger.debug("[TXN START] {%s}", name)
+ try:
+ return func(LoggingTransaction(txn, name), *args, **kwargs)
+ except:
+ logger.exception("[TXN FAIL] {%s}", name)
+ raise
+ finally:
+ end = time.clock() * 1000
+ transaction_logger.debug(
+ "[TXN END] {%s} %f",
+ name, end - start
+ )
return self._db_pool.runInteraction(inner_func, *args, **kwargs)
@@ -113,7 +145,7 @@ class SQLBaseStore(object):
else:
return cursor.fetchall()
- return self.runInteraction(interaction)
+ return self.runInteraction("_execute", interaction)
def _execute_and_decode(self, query, *args):
return self._execute(self.cursor_to_dict, query, *args)
@@ -130,6 +162,7 @@ class SQLBaseStore(object):
or_replace : bool; if True performs an INSERT OR REPLACE
"""
return self.runInteraction(
+ "_simple_insert",
self._simple_insert_txn, table, values, or_replace=or_replace,
or_ignore=or_ignore,
)
@@ -170,7 +203,6 @@ class SQLBaseStore(object):
table, keyvalues, retcols=retcols, allow_none=allow_none
)
- @defer.inlineCallbacks
def _simple_select_one_onecol(self, table, keyvalues, retcol,
allow_none=False):
"""Executes a SELECT query on the named table, which is expected to
@@ -181,19 +213,41 @@ class SQLBaseStore(object):
keyvalues : dict of column names and values to select the row with
retcol : string giving the name of the column to return
"""
- ret = yield self._simple_select_one(
+ return self.runInteraction(
+ "_simple_select_one_onecol_txn",
+ self._simple_select_one_onecol_txn,
+ table, keyvalues, retcol, allow_none=allow_none,
+ )
+
+ def _simple_select_one_onecol_txn(self, txn, table, keyvalues, retcol,
+ allow_none=False):
+ ret = self._simple_select_onecol_txn(
+ txn,
table=table,
keyvalues=keyvalues,
- retcols=[retcol],
- allow_none=allow_none
+ retcol=retcol,
)
if ret:
- defer.returnValue(ret[retcol])
+ return ret[0]
else:
- defer.returnValue(None)
+ if allow_none:
+ return None
+ else:
+ raise StoreError(404, "No row found")
+
+ def _simple_select_onecol_txn(self, txn, table, keyvalues, retcol):
+ sql = "SELECT %(retcol)s FROM %(table)s WHERE %(where)s" % {
+ "retcol": retcol,
+ "table": table,
+ "where": " AND ".join("%s = ?" % k for k in keyvalues.keys()),
+ }
+
+ txn.execute(sql, keyvalues.values())
+
+ return [r[0] for r in txn.fetchall()]
+
- @defer.inlineCallbacks
def _simple_select_onecol(self, table, keyvalues, retcol):
"""Executes a SELECT query on the named table, which returns a list
comprising of the values of the named column from the selected rows.
@@ -206,19 +260,11 @@ class SQLBaseStore(object):
Returns:
Deferred: Results in a list
"""
- sql = "SELECT %(retcol)s FROM %(table)s WHERE %(where)s" % {
- "retcol": retcol,
- "table": table,
- "where": " AND ".join("%s = ?" % k for k in keyvalues.keys()),
- }
-
- def func(txn):
- txn.execute(sql, keyvalues.values())
- return txn.fetchall()
-
- res = yield self.runInteraction(func)
-
- defer.returnValue([r[0] for r in res])
+ return self.runInteraction(
+ "_simple_select_onecol",
+ self._simple_select_onecol_txn,
+ table, keyvalues, retcol
+ )
def _simple_select_list(self, table, keyvalues, retcols):
"""Executes a SELECT query on the named table, which may return zero or
@@ -239,7 +285,7 @@ class SQLBaseStore(object):
txn.execute(sql, keyvalues.values())
return self.cursor_to_dict(txn)
- return self.runInteraction(func)
+ return self.runInteraction("_simple_select_list", func)
def _simple_update_one(self, table, keyvalues, updatevalues,
retcols=None):
@@ -307,7 +353,7 @@ class SQLBaseStore(object):
raise StoreError(500, "More than one row matched")
return ret
- return self.runInteraction(func)
+ return self.runInteraction("_simple_selectupdate_one", func)
def _simple_delete_one(self, table, keyvalues):
"""Executes a DELETE query on the named table, expecting to delete a
@@ -319,7 +365,7 @@ class SQLBaseStore(object):
"""
sql = "DELETE FROM %s WHERE %s" % (
table,
- " AND ".join("%s = ?" % (k) for k in keyvalues)
+ " AND ".join("%s = ?" % (k, ) for k in keyvalues)
)
def func(txn):
@@ -328,7 +374,25 @@ class SQLBaseStore(object):
raise StoreError(404, "No row found")
if txn.rowcount > 1:
raise StoreError(500, "more than one row matched")
- return self.runInteraction(func)
+ return self.runInteraction("_simple_delete_one", func)
+
+ def _simple_delete(self, table, keyvalues):
+ """Executes a DELETE query on the named table.
+
+ Args:
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the row with
+ """
+
+ return self.runInteraction("_simple_delete", self._simple_delete_txn)
+
+ def _simple_delete_txn(self, txn, table, keyvalues):
+ sql = "DELETE FROM %s WHERE %s" % (
+ table,
+ " AND ".join("%s = ?" % (k, ) for k in keyvalues)
+ )
+
+ return txn.execute(sql, keyvalues.values())
def _simple_max_id(self, table):
"""Executes a SELECT query on the named table, expecting to return the
@@ -346,7 +410,7 @@ class SQLBaseStore(object):
return 0
return max_id
- return self.runInteraction(func)
+ return self.runInteraction("_simple_max_id", func)
def _parse_event_from_row(self, row_dict):
d = copy.deepcopy({k: v for k, v in row_dict.items()})
@@ -370,7 +434,9 @@ class SQLBaseStore(object):
)
def _parse_events(self, rows):
- return self.runInteraction(self._parse_events_txn, rows)
+ return self.runInteraction(
+ "_parse_events", self._parse_events_txn, rows
+ )
def _parse_events_txn(self, txn, rows):
events = [self._parse_event_from_row(r) for r in rows]
@@ -378,6 +444,17 @@ class SQLBaseStore(object):
sql = "SELECT * FROM events WHERE event_id = ?"
for ev in events:
+ signatures = self._get_event_origin_signatures_txn(
+ txn, ev.event_id,
+ )
+
+ ev.signatures = {
+ k: encode_base64(v) for k, v in signatures.items()
+ }
+
+ prev_events = self._get_latest_events_in_room(txn, ev.room_id)
+ ev.prev_events = [(e_id, s,) for e_id, s, _ in prev_events]
+
if hasattr(ev, "prev_state"):
# Load previous state_content.
# TODO: Should we be pulling this out above?
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
index 52373a28a6..d6a7113b9c 100644
--- a/synapse/storage/directory.py
+++ b/synapse/storage/directory.py
@@ -95,6 +95,7 @@ class DirectoryStore(SQLBaseStore):
def delete_room_alias(self, room_alias):
return self.runInteraction(
+ "delete_room_alias",
self._delete_room_alias_txn,
room_alias,
)
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
new file mode 100644
index 0000000000..dcc116bad2
--- /dev/null
+++ b/synapse/storage/event_federation.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from syutil.base64util import encode_base64
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class EventFederationStore(SQLBaseStore):
+
+ def get_oldest_events_in_room(self, room_id):
+ return self.runInteraction(
+ "get_oldest_events_in_room",
+ self._get_oldest_events_in_room_txn,
+ room_id,
+ )
+
+ def _get_oldest_events_in_room_txn(self, txn, room_id):
+ return self._simple_select_onecol_txn(
+ txn,
+ table="event_backward_extremities",
+ keyvalues={
+ "room_id": room_id,
+ },
+ retcol="event_id",
+ )
+
+ def get_latest_events_in_room(self, room_id):
+ return self.runInteraction(
+ "get_latest_events_in_room",
+ self._get_latest_events_in_room,
+ room_id,
+ )
+
+ def _get_latest_events_in_room(self, txn, room_id):
+ self._simple_select_onecol_txn(
+ txn,
+ table="event_forward_extremities",
+ keyvalues={
+ "room_id": room_id,
+ },
+ retcol="event_id",
+ )
+
+ sql = (
+ "SELECT e.event_id, e.depth FROM events as e "
+ "INNER JOIN event_forward_extremities as f "
+ "ON e.event_id = f.event_id "
+ "WHERE f.room_id = ?"
+ )
+
+ txn.execute(sql, (room_id, ))
+
+ results = []
+ for event_id, depth in txn.fetchall():
+ hashes = self._get_event_reference_hashes_txn(txn, event_id)
+ prev_hashes = {
+ k: encode_base64(v) for k, v in hashes.items()
+ if k == "sha256"
+ }
+ results.append((event_id, prev_hashes, depth))
+
+ return results
+
+ def get_min_depth(self, room_id):
+ return self.runInteraction(
+ "get_min_depth",
+ self._get_min_depth_interaction,
+ room_id,
+ )
+
+ def _get_min_depth_interaction(self, txn, room_id):
+ min_depth = self._simple_select_one_onecol_txn(
+ txn,
+ table="room_depth",
+ keyvalues={"room_id": room_id,},
+ retcol="min_depth",
+ allow_none=True,
+ )
+
+ return int(min_depth) if min_depth is not None else None
+
+ def _update_min_depth_for_room_txn(self, txn, room_id, depth):
+ min_depth = self._get_min_depth_interaction(txn, room_id)
+
+ do_insert = depth < min_depth if min_depth else True
+
+ if do_insert:
+ self._simple_insert_txn(
+ txn,
+ table="room_depth",
+ values={
+ "room_id": room_id,
+ "min_depth": depth,
+ },
+ or_replace=True,
+ )
+
+ def _handle_prev_events(self, txn, outlier, event_id, prev_events,
+ room_id):
+ for e_id, _ in prev_events:
+ # TODO (erikj): This could be done as a bulk insert
+ self._simple_insert_txn(
+ txn,
+ table="event_edges",
+ values={
+ "event_id": event_id,
+ "prev_event_id": e_id,
+ "room_id": room_id,
+ },
+ or_ignore=True,
+ )
+
+ # Update the extremities table if this is not an outlier.
+ if not outlier:
+ for e_id, _ in prev_events:
+ # TODO (erikj): This could be done as a bulk insert
+ self._simple_delete_txn(
+ txn,
+ table="event_forward_extremities",
+ keyvalues={
+ "event_id": e_id,
+ "room_id": room_id,
+ }
+ )
+
+
+
+ # We only insert as a forward extremity the new pdu if there are no
+ # other pdus that reference it as a prev pdu
+ query = (
+ "INSERT OR IGNORE INTO %(table)s (event_id, room_id) "
+ "SELECT ?, ? WHERE NOT EXISTS ("
+ "SELECT 1 FROM %(event_edges)s WHERE "
+ "prev_event_id = ? "
+ ")"
+ ) % {
+ "table": "event_forward_extremities",
+ "event_edges": "event_edges",
+ }
+
+ logger.debug("query: %s", query)
+
+ txn.execute(query, (event_id, room_id, event_id))
+
+ # Insert all the prev_pdus as a backwards thing, they'll get
+ # deleted in a second if they're incorrect anyway.
+ for e_id, _ in prev_events:
+ # TODO (erikj): This could be done as a bulk insert
+ self._simple_insert_txn(
+ txn,
+ table="event_backward_extremities",
+ values={
+ "event_id": e_id,
+ "room_id": room_id,
+ },
+ or_ignore=True,
+ )
+
+ # Also delete from the backwards extremities table all ones that
+ # reference pdus that we have already seen
+ query = (
+ "DELETE FROM event_backward_extremities WHERE EXISTS ("
+ "SELECT 1 FROM events "
+ "WHERE "
+ "event_backward_extremities.event_id = events.event_id "
+ "AND not events.outlier "
+ ")"
+ )
+ txn.execute(query)
+
+
+ def get_backfill_events(self, room_id, event_list, limit):
+ """Get a list of Events for a given topic that occured before (and
+ including) the pdus in pdu_list. Return a list of max size `limit`.
+
+ Args:
+ txn
+ room_id (str)
+ event_list (list)
+ limit (int)
+
+ Return:
+ list: A list of PduTuples
+ """
+ return self.runInteraction(
+ "get_backfill_events",
+ self._get_backfill_events, room_id, event_list, limit
+ )
+
+ def _get_backfill_events(self, txn, room_id, event_list, limit):
+ logger.debug(
+ "_get_backfill_events: %s, %s, %s",
+ room_id, repr(event_list), limit
+ )
+
+ # We seed the pdu_results with the things from the pdu_list.
+ event_results = event_list
+
+ front = event_list
+
+ query = (
+ "SELECT prev_event_id FROM event_edges "
+ "WHERE room_id = ? AND event_id = ? "
+ "LIMIT ?"
+ )
+
+ # We iterate through all event_ids in `front` to select their previous
+ # events. These are dumped in `new_front`.
+ # We continue until we reach the limit *or* new_front is empty (i.e.,
+ # we've run out of things to select
+ while front and len(event_results) < limit:
+
+ new_front = []
+ for event_id in front:
+ logger.debug(
+ "_backfill_interaction: id=%s",
+ event_id
+ )
+
+ txn.execute(
+ query,
+ (room_id, event_id, limit - len(event_results))
+ )
+
+ for row in txn.fetchall():
+ logger.debug(
+ "_backfill_interaction: got id=%s",
+ *row
+ )
+ new_front.append(row)
+
+ front = new_front
+ event_results += new_front
+
+ # We also want to update the `prev_pdus` attributes before returning.
+ return self._get_pdu_tuples(txn, event_results)
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
index 4feb8335ba..fd705138e6 100644
--- a/synapse/storage/keys.py
+++ b/synapse/storage/keys.py
@@ -18,9 +18,10 @@ from _base import SQLBaseStore
from twisted.internet import defer
import OpenSSL
-from syutil.crypto.signing_key import decode_verify_key_bytes
+from syutil.crypto.signing_key import decode_verify_key_bytes
import hashlib
+
class KeyStore(SQLBaseStore):
"""Persistence for signature verification keys and tls X.509 certificates
"""
diff --git a/synapse/storage/pdu.py b/synapse/storage/pdu.py
deleted file mode 100644
index 3a90c382f0..0000000000
--- a/synapse/storage/pdu.py
+++ /dev/null
@@ -1,932 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from twisted.internet import defer
-
-from ._base import SQLBaseStore, Table, JoinHelper
-
-from synapse.federation.units import Pdu
-from synapse.util.logutils import log_function
-
-from syutil.base64util import encode_base64
-
-from collections import namedtuple
-
-import logging
-
-
-logger = logging.getLogger(__name__)
-
-
-class PduStore(SQLBaseStore):
- """A collection of queries for handling PDUs.
- """
-
- def get_pdu(self, pdu_id, origin):
- """Given a pdu_id and origin, get a PDU.
-
- Args:
- txn
- pdu_id (str)
- origin (str)
-
- Returns:
- PduTuple: If the pdu does not exist in the database, returns None
- """
-
- return self.runInteraction(
- self._get_pdu_tuple, pdu_id, origin
- )
-
- def _get_pdu_tuple(self, txn, pdu_id, origin):
- res = self._get_pdu_tuples(txn, [(pdu_id, origin)])
- return res[0] if res else None
-
- def _get_pdu_tuples(self, txn, pdu_id_tuples):
- results = []
- for pdu_id, origin in pdu_id_tuples:
- txn.execute(
- PduEdgesTable.select_statement("pdu_id = ? AND origin = ?"),
- (pdu_id, origin)
- )
-
- edges = [
- (r.prev_pdu_id, r.prev_origin)
- for r in PduEdgesTable.decode_results(txn.fetchall())
- ]
-
- edge_hashes = self._get_prev_pdu_hashes_txn(txn, pdu_id, origin)
-
- hashes = self._get_pdu_content_hashes_txn(txn, pdu_id, origin)
- signatures = self._get_pdu_origin_signatures_txn(
- txn, pdu_id, origin
- )
-
- query = (
- "SELECT %(fields)s FROM %(pdus)s as p "
- "LEFT JOIN %(state)s as s "
- "ON p.pdu_id = s.pdu_id AND p.origin = s.origin "
- "WHERE p.pdu_id = ? AND p.origin = ? "
- ) % {
- "fields": _pdu_state_joiner.get_fields(
- PdusTable="p", StatePdusTable="s"),
- "pdus": PdusTable.table_name,
- "state": StatePdusTable.table_name,
- }
-
- txn.execute(query, (pdu_id, origin))
-
- row = txn.fetchone()
- if row:
- results.append(PduTuple(
- PduEntry(*row), edges, hashes, signatures, edge_hashes
- ))
-
- return results
-
- def get_current_state_for_context(self, context):
- """Get a list of PDUs that represent the current state for a given
- context
-
- Args:
- context (str)
-
- Returns:
- list: A list of PduTuples
- """
-
- return self.runInteraction(
- self._get_current_state_for_context,
- context
- )
-
- def _get_current_state_for_context(self, txn, context):
- query = (
- "SELECT pdu_id, origin FROM %s WHERE context = ?"
- % CurrentStateTable.table_name
- )
-
- logger.debug("get_current_state %s, Args=%s", query, context)
- txn.execute(query, (context,))
-
- res = txn.fetchall()
-
- logger.debug("get_current_state %d results", len(res))
-
- return self._get_pdu_tuples(txn, res)
-
- def _persist_pdu_txn(self, txn, prev_pdus, cols):
- """Inserts a (non-state) PDU into the database.
-
- Args:
- txn,
- prev_pdus (list)
- **cols: The columns to insert into the PdusTable.
- """
- entry = PdusTable.EntryType(
- **{k: cols.get(k, None) for k in PdusTable.fields}
- )
-
- txn.execute(PdusTable.insert_statement(), entry)
-
- self._handle_prev_pdus(
- txn, entry.outlier, entry.pdu_id, entry.origin,
- prev_pdus, entry.context
- )
-
- def mark_pdu_as_processed(self, pdu_id, pdu_origin):
- """Mark a received PDU as processed.
-
- Args:
- txn
- pdu_id (str)
- pdu_origin (str)
- """
-
- return self.runInteraction(
- self._mark_as_processed, pdu_id, pdu_origin
- )
-
- def _mark_as_processed(self, txn, pdu_id, pdu_origin):
- txn.execute("UPDATE %s SET have_processed = 1" % PdusTable.table_name)
-
- def get_all_pdus_from_context(self, context):
- """Get a list of all PDUs for a given context."""
- return self.runInteraction(
- self._get_all_pdus_from_context, context,
- )
-
- def _get_all_pdus_from_context(self, txn, context):
- query = (
- "SELECT pdu_id, origin FROM %s "
- "WHERE context = ?"
- ) % PdusTable.table_name
-
- txn.execute(query, (context,))
-
- return self._get_pdu_tuples(txn, txn.fetchall())
-
- def get_backfill(self, context, pdu_list, limit):
- """Get a list of Pdus for a given topic that occured before (and
- including) the pdus in pdu_list. Return a list of max size `limit`.
-
- Args:
- txn
- context (str)
- pdu_list (list)
- limit (int)
-
- Return:
- list: A list of PduTuples
- """
- return self.runInteraction(
- self._get_backfill, context, pdu_list, limit
- )
-
- def _get_backfill(self, txn, context, pdu_list, limit):
- logger.debug(
- "backfill: %s, %s, %s",
- context, repr(pdu_list), limit
- )
-
- # We seed the pdu_results with the things from the pdu_list.
- pdu_results = pdu_list
-
- front = pdu_list
-
- query = (
- "SELECT prev_pdu_id, prev_origin FROM %(edges_table)s "
- "WHERE context = ? AND pdu_id = ? AND origin = ? "
- "LIMIT ?"
- ) % {
- "edges_table": PduEdgesTable.table_name,
- }
-
- # We iterate through all pdu_ids in `front` to select their previous
- # pdus. These are dumped in `new_front`. We continue until we reach the
- # limit *or* new_front is empty (i.e., we've run out of things to
- # select
- while front and len(pdu_results) < limit:
-
- new_front = []
- for pdu_id, origin in front:
- logger.debug(
- "_backfill_interaction: i=%s, o=%s",
- pdu_id, origin
- )
-
- txn.execute(
- query,
- (context, pdu_id, origin, limit - len(pdu_results))
- )
-
- for row in txn.fetchall():
- logger.debug(
- "_backfill_interaction: got i=%s, o=%s",
- *row
- )
- new_front.append(row)
-
- front = new_front
- pdu_results += new_front
-
- # We also want to update the `prev_pdus` attributes before returning.
- return self._get_pdu_tuples(txn, pdu_results)
-
- def get_min_depth_for_context(self, context):
- """Get the current minimum depth for a context
-
- Args:
- txn
- context (str)
- """
- return self.runInteraction(
- self._get_min_depth_for_context, context
- )
-
- def _get_min_depth_for_context(self, txn, context):
- return self._get_min_depth_interaction(txn, context)
-
- def _get_min_depth_interaction(self, txn, context):
- txn.execute(
- "SELECT min_depth FROM %s WHERE context = ?"
- % ContextDepthTable.table_name,
- (context,)
- )
-
- row = txn.fetchone()
-
- return row[0] if row else None
-
- def _update_min_depth_for_context_txn(self, txn, context, depth):
- """Update the minimum `depth` of the given context, which is the line
- on which we stop backfilling backwards.
-
- Args:
- context (str)
- depth (int)
- """
- min_depth = self._get_min_depth_interaction(txn, context)
-
- do_insert = depth < min_depth if min_depth else True
-
- if do_insert:
- txn.execute(
- "INSERT OR REPLACE INTO %s (context, min_depth) "
- "VALUES (?,?)" % ContextDepthTable.table_name,
- (context, depth)
- )
-
- def _get_latest_pdus_in_context(self, txn, context):
- """Get's a list of the most current pdus for a given context. This is
- used when we are sending a Pdu and need to fill out the `prev_pdus`
- key
-
- Args:
- txn
- context
- """
- query = (
- "SELECT p.pdu_id, p.origin, p.depth FROM %(pdus)s as p "
- "INNER JOIN %(forward)s as f ON p.pdu_id = f.pdu_id "
- "AND f.origin = p.origin "
- "WHERE f.context = ?"
- ) % {
- "pdus": PdusTable.table_name,
- "forward": PduForwardExtremitiesTable.table_name,
- }
-
- logger.debug("get_prev query: %s", query)
-
- txn.execute(
- query,
- (context, )
- )
-
- results = []
- for pdu_id, origin, depth in txn.fetchall():
- hashes = self._get_pdu_reference_hashes_txn(txn, pdu_id, origin)
- sha256_bytes = hashes["sha256"]
- prev_hashes = {"sha256": encode_base64(sha256_bytes)}
- results.append((pdu_id, origin, prev_hashes, depth))
-
- return results
-
- @defer.inlineCallbacks
- def get_oldest_pdus_in_context(self, context):
- """Get a list of Pdus that we haven't backfilled beyond yet (and havent
- seen). This list is used when we want to backfill backwards and is the
- list we send to the remote server.
-
- Args:
- txn
- context (str)
-
- Returns:
- list: A list of PduIdTuple.
- """
- results = yield self._execute(
- None,
- "SELECT pdu_id, origin FROM %(back)s WHERE context = ?"
- % {"back": PduBackwardExtremitiesTable.table_name, },
- context
- )
-
- defer.returnValue([PduIdTuple(i, o) for i, o in results])
-
- def is_pdu_new(self, pdu_id, origin, context, depth):
- """For a given Pdu, try and figure out if it's 'new', i.e., if it's
- not something we got randomly from the past, for example when we
- request the current state of the room that will probably return a bunch
- of pdus from before we joined.
-
- Args:
- txn
- pdu_id (str)
- origin (str)
- context (str)
- depth (int)
-
- Returns:
- bool
- """
-
- return self.runInteraction(
- self._is_pdu_new,
- pdu_id=pdu_id,
- origin=origin,
- context=context,
- depth=depth
- )
-
- def _is_pdu_new(self, txn, pdu_id, origin, context, depth):
- # If depth > min depth in back table, then we classify it as new.
- # OR if there is nothing in the back table, then it kinda needs to
- # be a new thing.
- query = (
- "SELECT min(p.depth) FROM %(edges)s as e "
- "INNER JOIN %(back)s as b "
- "ON e.prev_pdu_id = b.pdu_id AND e.prev_origin = b.origin "
- "INNER JOIN %(pdus)s as p "
- "ON e.pdu_id = p.pdu_id AND p.origin = e.origin "
- "WHERE p.context = ?"
- ) % {
- "pdus": PdusTable.table_name,
- "edges": PduEdgesTable.table_name,
- "back": PduBackwardExtremitiesTable.table_name,
- }
-
- txn.execute(query, (context,))
-
- min_depth, = txn.fetchone()
-
- if not min_depth or depth > int(min_depth):
- logger.debug(
- "is_new true: id=%s, o=%s, d=%s min_depth=%s",
- pdu_id, origin, depth, min_depth
- )
- return True
-
- # If this pdu is in the forwards table, then it also is a new one
- query = (
- "SELECT * FROM %(forward)s WHERE pdu_id = ? AND origin = ?"
- ) % {
- "forward": PduForwardExtremitiesTable.table_name,
- }
-
- txn.execute(query, (pdu_id, origin))
-
- # Did we get anything?
- if txn.fetchall():
- logger.debug(
- "is_new true: id=%s, o=%s, d=%s was forward",
- pdu_id, origin, depth
- )
- return True
-
- logger.debug(
- "is_new false: id=%s, o=%s, d=%s",
- pdu_id, origin, depth
- )
-
- # FINE THEN. It's probably old.
- return False
-
- @staticmethod
- @log_function
- def _handle_prev_pdus(txn, outlier, pdu_id, origin, prev_pdus,
- context):
- txn.executemany(
- PduEdgesTable.insert_statement(),
- [(pdu_id, origin, p[0], p[1], context) for p in prev_pdus]
- )
-
- # Update the extremities table if this is not an outlier.
- if not outlier:
-
- # First, we delete the new one from the forwards extremities table.
- query = (
- "DELETE FROM %s WHERE pdu_id = ? AND origin = ?"
- % PduForwardExtremitiesTable.table_name
- )
- txn.executemany(query, list(p[:2] for p in prev_pdus))
-
- # We only insert as a forward extremety the new pdu if there are no
- # other pdus that reference it as a prev pdu
- query = (
- "INSERT INTO %(table)s (pdu_id, origin, context) "
- "SELECT ?, ?, ? WHERE NOT EXISTS ("
- "SELECT 1 FROM %(pdu_edges)s WHERE "
- "prev_pdu_id = ? AND prev_origin = ?"
- ")"
- ) % {
- "table": PduForwardExtremitiesTable.table_name,
- "pdu_edges": PduEdgesTable.table_name
- }
-
- logger.debug("query: %s", query)
-
- txn.execute(query, (pdu_id, origin, context, pdu_id, origin))
-
- # Insert all the prev_pdus as a backwards thing, they'll get
- # deleted in a second if they're incorrect anyway.
- txn.executemany(
- PduBackwardExtremitiesTable.insert_statement(),
- [(i, o, context) for i, o, _ in prev_pdus]
- )
-
- # Also delete from the backwards extremities table all ones that
- # reference pdus that we have already seen
- query = (
- "DELETE FROM %(pdu_back)s WHERE EXISTS ("
- "SELECT 1 FROM %(pdus)s AS pdus "
- "WHERE "
- "%(pdu_back)s.pdu_id = pdus.pdu_id "
- "AND %(pdu_back)s.origin = pdus.origin "
- "AND not pdus.outlier "
- ")"
- ) % {
- "pdu_back": PduBackwardExtremitiesTable.table_name,
- "pdus": PdusTable.table_name,
- }
- txn.execute(query)
-
-
-class StatePduStore(SQLBaseStore):
- """A collection of queries for handling state PDUs.
- """
-
- def _persist_state_txn(self, txn, prev_pdus, cols):
- """Inserts a state PDU into the database
-
- Args:
- txn,
- prev_pdus (list)
- **cols: The columns to insert into the PdusTable and StatePdusTable
- """
- pdu_entry = PdusTable.EntryType(
- **{k: cols.get(k, None) for k in PdusTable.fields}
- )
- state_entry = StatePdusTable.EntryType(
- **{k: cols.get(k, None) for k in StatePdusTable.fields}
- )
-
- logger.debug("Inserting pdu: %s", repr(pdu_entry))
- logger.debug("Inserting state: %s", repr(state_entry))
-
- txn.execute(PdusTable.insert_statement(), pdu_entry)
- txn.execute(StatePdusTable.insert_statement(), state_entry)
-
- self._handle_prev_pdus(
- txn,
- pdu_entry.outlier, pdu_entry.pdu_id, pdu_entry.origin, prev_pdus,
- pdu_entry.context
- )
-
- def get_unresolved_state_tree(self, new_state_pdu):
- return self.runInteraction(
- self._get_unresolved_state_tree, new_state_pdu
- )
-
- @log_function
- def _get_unresolved_state_tree(self, txn, new_pdu):
- current = self._get_current_interaction(
- txn,
- new_pdu.context, new_pdu.pdu_type, new_pdu.state_key
- )
-
- ReturnType = namedtuple(
- "StateReturnType", ["new_branch", "current_branch"]
- )
- return_value = ReturnType([new_pdu], [])
-
- if not current:
- logger.debug("get_unresolved_state_tree No current state.")
- return (return_value, None)
-
- return_value.current_branch.append(current)
-
- enum_branches = self._enumerate_state_branches(
- txn, new_pdu, current
- )
-
- missing_branch = None
- for branch, prev_state, state in enum_branches:
- if state:
- return_value[branch].append(state)
- else:
- # We don't have prev_state :(
- missing_branch = branch
- break
-
- return (return_value, missing_branch)
-
- def update_current_state(self, pdu_id, origin, context, pdu_type,
- state_key):
- return self.runInteraction(
- self._update_current_state,
- pdu_id, origin, context, pdu_type, state_key
- )
-
- def _update_current_state(self, txn, pdu_id, origin, context, pdu_type,
- state_key):
- query = (
- "INSERT OR REPLACE INTO %(curr)s (%(fields)s) VALUES (%(qs)s)"
- ) % {
- "curr": CurrentStateTable.table_name,
- "fields": CurrentStateTable.get_fields_string(),
- "qs": ", ".join(["?"] * len(CurrentStateTable.fields))
- }
-
- query_args = CurrentStateTable.EntryType(
- pdu_id=pdu_id,
- origin=origin,
- context=context,
- pdu_type=pdu_type,
- state_key=state_key
- )
-
- txn.execute(query, query_args)
-
- def get_current_state_pdu(self, context, pdu_type, state_key):
- """For a given context, pdu_type, state_key 3-tuple, return what is
- currently considered the current state.
-
- Args:
- txn
- context (str)
- pdu_type (str)
- state_key (str)
-
- Returns:
- PduEntry
- """
-
- return self.runInteraction(
- self._get_current_state_pdu, context, pdu_type, state_key
- )
-
- def _get_current_state_pdu(self, txn, context, pdu_type, state_key):
- return self._get_current_interaction(txn, context, pdu_type, state_key)
-
- def _get_current_interaction(self, txn, context, pdu_type, state_key):
- logger.debug(
- "_get_current_interaction %s %s %s",
- context, pdu_type, state_key
- )
-
- fields = _pdu_state_joiner.get_fields(
- PdusTable="p", StatePdusTable="s")
-
- current_query = (
- "SELECT %(fields)s FROM %(state)s as s "
- "INNER JOIN %(pdus)s as p "
- "ON s.pdu_id = p.pdu_id AND s.origin = p.origin "
- "INNER JOIN %(curr)s as c "
- "ON s.pdu_id = c.pdu_id AND s.origin = c.origin "
- "WHERE s.context = ? AND s.pdu_type = ? AND s.state_key = ? "
- ) % {
- "fields": fields,
- "curr": CurrentStateTable.table_name,
- "state": StatePdusTable.table_name,
- "pdus": PdusTable.table_name,
- }
-
- txn.execute(
- current_query,
- (context, pdu_type, state_key)
- )
-
- row = txn.fetchone()
-
- result = PduEntry(*row) if row else None
-
- if not result:
- logger.debug("_get_current_interaction not found")
- else:
- logger.debug(
- "_get_current_interaction found %s %s",
- result.pdu_id, result.origin
- )
-
- return result
-
- def handle_new_state(self, new_pdu):
- """Actually perform conflict resolution on the new_pdu on the
- assumption we have all the pdus required to perform it.
-
- Args:
- new_pdu
-
- Returns:
- bool: True if the new_pdu clobbered the current state, False if not
- """
- return self.runInteraction(
- self._handle_new_state, new_pdu
- )
-
- def _handle_new_state(self, txn, new_pdu):
- logger.debug(
- "handle_new_state %s %s",
- new_pdu.pdu_id, new_pdu.origin
- )
-
- current = self._get_current_interaction(
- txn,
- new_pdu.context, new_pdu.pdu_type, new_pdu.state_key
- )
-
- is_current = False
-
- if (not current or not current.prev_state_id
- or not current.prev_state_origin):
- # Oh, we don't have any state for this yet.
- is_current = True
- elif (current.pdu_id == new_pdu.prev_state_id
- and current.origin == new_pdu.prev_state_origin):
- # Oh! A direct clobber. Just do it.
- is_current = True
- else:
- ##
- # Ok, now loop through until we get to a common ancestor.
- max_new = int(new_pdu.power_level)
- max_current = int(current.power_level)
-
- enum_branches = self._enumerate_state_branches(
- txn, new_pdu, current
- )
- for branch, prev_state, state in enum_branches:
- if not state:
- raise RuntimeError(
- "Could not find state_pdu %s %s" %
- (
- prev_state.prev_state_id,
- prev_state.prev_state_origin
- )
- )
-
- if branch == 0:
- max_new = max(int(state.depth), max_new)
- else:
- max_current = max(int(state.depth), max_current)
-
- is_current = max_new > max_current
-
- if is_current:
- logger.debug("handle_new_state make current")
-
- # Right, this is a new thing, so woo, just insert it.
- txn.execute(
- "INSERT OR REPLACE INTO %(curr)s (%(fields)s) VALUES (%(qs)s)"
- % {
- "curr": CurrentStateTable.table_name,
- "fields": CurrentStateTable.get_fields_string(),
- "qs": ", ".join(["?"] * len(CurrentStateTable.fields))
- },
- CurrentStateTable.EntryType(
- *(new_pdu.__dict__[k] for k in CurrentStateTable.fields)
- )
- )
- else:
- logger.debug("handle_new_state not current")
-
- logger.debug("handle_new_state done")
-
- return is_current
-
- @log_function
- def _enumerate_state_branches(self, txn, pdu_a, pdu_b):
- branch_a = pdu_a
- branch_b = pdu_b
-
- while True:
- if (branch_a.pdu_id == branch_b.pdu_id
- and branch_a.origin == branch_b.origin):
- # Woo! We found a common ancestor
- logger.debug("_enumerate_state_branches Found common ancestor")
- break
-
- do_branch_a = (
- hasattr(branch_a, "prev_state_id") and
- branch_a.prev_state_id
- )
-
- do_branch_b = (
- hasattr(branch_b, "prev_state_id") and
- branch_b.prev_state_id
- )
-
- logger.debug(
- "do_branch_a=%s, do_branch_b=%s",
- do_branch_a, do_branch_b
- )
-
- if do_branch_a and do_branch_b:
- do_branch_a = int(branch_a.depth) > int(branch_b.depth)
-
- if do_branch_a:
- pdu_tuple = PduIdTuple(
- branch_a.prev_state_id,
- branch_a.prev_state_origin
- )
-
- prev_branch = branch_a
-
- logger.debug("getting branch_a prev %s", pdu_tuple)
- branch_a = self._get_pdu_tuple(txn, *pdu_tuple)
- if branch_a:
- branch_a = Pdu.from_pdu_tuple(branch_a)
-
- logger.debug("branch_a=%s", branch_a)
-
- yield (0, prev_branch, branch_a)
-
- if not branch_a:
- break
- elif do_branch_b:
- pdu_tuple = PduIdTuple(
- branch_b.prev_state_id,
- branch_b.prev_state_origin
- )
-
- prev_branch = branch_b
-
- logger.debug("getting branch_b prev %s", pdu_tuple)
- branch_b = self._get_pdu_tuple(txn, *pdu_tuple)
- if branch_b:
- branch_b = Pdu.from_pdu_tuple(branch_b)
-
- logger.debug("branch_b=%s", branch_b)
-
- yield (1, prev_branch, branch_b)
-
- if not branch_b:
- break
- else:
- break
-
-
-class PdusTable(Table):
- table_name = "pdus"
-
- fields = [
- "pdu_id",
- "origin",
- "context",
- "pdu_type",
- "ts",
- "depth",
- "is_state",
- "content_json",
- "unrecognized_keys",
- "outlier",
- "have_processed",
- ]
-
- EntryType = namedtuple("PdusEntry", fields)
-
-
-class PduDestinationsTable(Table):
- table_name = "pdu_destinations"
-
- fields = [
- "pdu_id",
- "origin",
- "destination",
- "delivered_ts",
- ]
-
- EntryType = namedtuple("PduDestinationsEntry", fields)
-
-
-class PduEdgesTable(Table):
- table_name = "pdu_edges"
-
- fields = [
- "pdu_id",
- "origin",
- "prev_pdu_id",
- "prev_origin",
- "context"
- ]
-
- EntryType = namedtuple("PduEdgesEntry", fields)
-
-
-class PduForwardExtremitiesTable(Table):
- table_name = "pdu_forward_extremities"
-
- fields = [
- "pdu_id",
- "origin",
- "context",
- ]
-
- EntryType = namedtuple("PduForwardExtremitiesEntry", fields)
-
-
-class PduBackwardExtremitiesTable(Table):
- table_name = "pdu_backward_extremities"
-
- fields = [
- "pdu_id",
- "origin",
- "context",
- ]
-
- EntryType = namedtuple("PduBackwardExtremitiesEntry", fields)
-
-
-class ContextDepthTable(Table):
- table_name = "context_depth"
-
- fields = [
- "context",
- "min_depth",
- ]
-
- EntryType = namedtuple("ContextDepthEntry", fields)
-
-
-class StatePdusTable(Table):
- table_name = "state_pdus"
-
- fields = [
- "pdu_id",
- "origin",
- "context",
- "pdu_type",
- "state_key",
- "power_level",
- "prev_state_id",
- "prev_state_origin",
- ]
-
- EntryType = namedtuple("StatePdusEntry", fields)
-
-
-class CurrentStateTable(Table):
- table_name = "current_state"
-
- fields = [
- "pdu_id",
- "origin",
- "context",
- "pdu_type",
- "state_key",
- ]
-
- EntryType = namedtuple("CurrentStateEntry", fields)
-
-_pdu_state_joiner = JoinHelper(PdusTable, StatePdusTable)
-
-
-# TODO: These should probably be put somewhere more sensible
-PduIdTuple = namedtuple("PduIdTuple", ("pdu_id", "origin"))
-
-PduEntry = _pdu_state_joiner.EntryType
-""" We are always interested in the join of the PdusTable and StatePdusTable,
-rather than just the PdusTable.
-
-This does not include a prev_pdus key.
-"""
-
-PduTuple = namedtuple(
- "PduTuple",
- ("pdu_entry", "prev_pdu_list", "hashes", "signatures", "edge_hashes")
-)
-""" This is a tuple of a `PduEntry` and a list of `PduIdTuple` that represent
-the `prev_pdus` key of a PDU.
-"""
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 719806f82b..a2ca6f9a69 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -62,8 +62,10 @@ class RegistrationStore(SQLBaseStore):
Raises:
StoreError if the user_id could not be registered.
"""
- yield self.runInteraction(self._register, user_id, token,
- password_hash)
+ yield self.runInteraction(
+ "register",
+ self._register, user_id, token, password_hash
+ )
def _register(self, txn, user_id, token, password_hash):
now = int(self.clock.time())
@@ -100,6 +102,7 @@ class RegistrationStore(SQLBaseStore):
StoreError if no user was found.
"""
return self.runInteraction(
+ "get_user_by_token",
self._query_for_auth,
token
)
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 8cd46334cf..7e48ce9cc3 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -150,6 +150,7 @@ class RoomStore(SQLBaseStore):
def get_power_level(self, room_id, user_id):
return self.runInteraction(
+ "get_power_level",
self._get_power_level,
room_id, user_id,
)
@@ -183,6 +184,7 @@ class RoomStore(SQLBaseStore):
def get_ops_levels(self, room_id):
return self.runInteraction(
+ "get_ops_levels",
self._get_ops_levels,
room_id,
)
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index ceeef5880e..93329703a2 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -33,7 +33,9 @@ class RoomMemberStore(SQLBaseStore):
target_user_id = event.state_key
domain = self.hs.parse_userid(target_user_id).domain
except:
- logger.exception("Failed to parse target_user_id=%s", target_user_id)
+ logger.exception(
+ "Failed to parse target_user_id=%s", target_user_id
+ )
raise
logger.debug(
@@ -65,7 +67,8 @@ class RoomMemberStore(SQLBaseStore):
# Check if this was the last person to have left.
member_events = self._get_members_query_txn(
txn,
- where_clause="c.room_id = ? AND m.membership = ? AND m.user_id != ?",
+ where_clause=("c.room_id = ? AND m.membership = ?"
+ " AND m.user_id != ?"),
where_values=(event.room_id, Membership.JOIN, target_user_id,)
)
@@ -120,7 +123,6 @@ class RoomMemberStore(SQLBaseStore):
else:
return None
-
def get_room_members(self, room_id, membership=None):
"""Retrieve the current room member list for a room.
diff --git a/synapse/storage/schema/edge_pdus.sql b/synapse/storage/schema/edge_pdus.sql
deleted file mode 100644
index 8a00868065..0000000000
--- a/synapse/storage/schema/edge_pdus.sql
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright 2014 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS context_edge_pdus(
- id INTEGER PRIMARY KEY AUTOINCREMENT, -- twistar requires this
- pdu_id TEXT,
- origin TEXT,
- context TEXT,
- CONSTRAINT context_edge_pdu_id_origin UNIQUE (pdu_id, origin)
-);
-
-CREATE TABLE IF NOT EXISTS origin_edge_pdus(
- id INTEGER PRIMARY KEY AUTOINCREMENT, -- twistar requires this
- pdu_id TEXT,
- origin TEXT,
- CONSTRAINT origin_edge_pdu_id_origin UNIQUE (pdu_id, origin)
-);
-
-CREATE INDEX IF NOT EXISTS context_edge_pdu_id ON context_edge_pdus(pdu_id, origin);
-CREATE INDEX IF NOT EXISTS origin_edge_pdu_id ON origin_edge_pdus(pdu_id, origin);
diff --git a/synapse/storage/schema/event_edges.sql b/synapse/storage/schema/event_edges.sql
new file mode 100644
index 0000000000..e5f768c705
--- /dev/null
+++ b/synapse/storage/schema/event_edges.sql
@@ -0,0 +1,49 @@
+
+CREATE TABLE IF NOT EXISTS event_forward_extremities(
+ event_id TEXT,
+ room_id TEXT,
+ CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE
+);
+
+CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id);
+CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_backward_extremities(
+ event_id TEXT,
+ room_id TEXT,
+ CONSTRAINT uniqueness UNIQUE (event_id, room_id) ON CONFLICT REPLACE
+);
+
+CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id);
+CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_edges(
+ event_id TEXT,
+ prev_event_id TEXT,
+ room_id TEXT,
+ CONSTRAINT uniqueness UNIQUE (event_id, prev_event_id, room_id)
+);
+
+CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id);
+CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id);
+
+
+CREATE TABLE IF NOT EXISTS room_depth(
+ room_id TEXT,
+ min_depth INTEGER,
+ CONSTRAINT uniqueness UNIQUE (room_id)
+);
+
+CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id);
+
+
+create TABLE IF NOT EXISTS event_destinations(
+ event_id TEXT,
+ destination TEXT,
+ delivered_ts INTEGER DEFAULT 0, -- or 0 if not delivered
+ CONSTRAINT uniqueness UNIQUE (event_id, destination) ON CONFLICT REPLACE
+);
+
+CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id);
diff --git a/synapse/storage/schema/event_signatures.sql b/synapse/storage/schema/event_signatures.sql
new file mode 100644
index 0000000000..5491c7ecec
--- /dev/null
+++ b/synapse/storage/schema/event_signatures.sql
@@ -0,0 +1,65 @@
+/* Copyright 2014 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_content_hashes (
+ event_id TEXT,
+ algorithm TEXT,
+ hash BLOB,
+ CONSTRAINT uniqueness UNIQUE (event_id, algorithm)
+);
+
+CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes(
+ event_id
+);
+
+
+CREATE TABLE IF NOT EXISTS event_reference_hashes (
+ event_id TEXT,
+ algorithm TEXT,
+ hash BLOB,
+ CONSTRAINT uniqueness UNIQUE (event_id, algorithm)
+);
+
+CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes (
+ event_id
+);
+
+
+CREATE TABLE IF NOT EXISTS event_origin_signatures (
+ event_id TEXT,
+ origin TEXT,
+ key_id TEXT,
+ signature BLOB,
+ CONSTRAINT uniqueness UNIQUE (event_id, key_id)
+);
+
+CREATE INDEX IF NOT EXISTS event_origin_signatures_id ON event_origin_signatures (
+ event_id
+);
+
+
+CREATE TABLE IF NOT EXISTS event_edge_hashes(
+ event_id TEXT,
+ prev_event_id TEXT,
+ algorithm TEXT,
+ hash BLOB,
+ CONSTRAINT uniqueness UNIQUE (
+ event_id, prev_event_id, algorithm
+ )
+);
+
+CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes(
+ event_id
+);
diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql
index 3aa83f5c8c..8d6f655993 100644
--- a/synapse/storage/schema/im.sql
+++ b/synapse/storage/schema/im.sql
@@ -23,6 +23,7 @@ CREATE TABLE IF NOT EXISTS events(
unrecognized_keys TEXT,
processed BOOL NOT NULL,
outlier BOOL NOT NULL,
+ depth INTEGER DEFAULT 0 NOT NULL,
CONSTRAINT ev_uniq UNIQUE (event_id)
);
diff --git a/synapse/storage/schema/pdu.sql b/synapse/storage/schema/pdu.sql
deleted file mode 100644
index 16e111a56c..0000000000
--- a/synapse/storage/schema/pdu.sql
+++ /dev/null
@@ -1,106 +0,0 @@
-/* Copyright 2014 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--- Stores pdus and their content
-CREATE TABLE IF NOT EXISTS pdus(
- pdu_id TEXT,
- origin TEXT,
- context TEXT,
- pdu_type TEXT,
- ts INTEGER,
- depth INTEGER DEFAULT 0 NOT NULL,
- is_state BOOL,
- content_json TEXT,
- unrecognized_keys TEXT,
- outlier BOOL NOT NULL,
- have_processed BOOL,
- CONSTRAINT pdu_id_origin UNIQUE (pdu_id, origin)
-);
-
--- Stores what the current state pdu is for a given (context, pdu_type, key) tuple
-CREATE TABLE IF NOT EXISTS state_pdus(
- pdu_id TEXT,
- origin TEXT,
- context TEXT,
- pdu_type TEXT,
- state_key TEXT,
- power_level TEXT,
- prev_state_id TEXT,
- prev_state_origin TEXT,
- CONSTRAINT pdu_id_origin UNIQUE (pdu_id, origin)
- CONSTRAINT prev_pdu_id_origin UNIQUE (prev_state_id, prev_state_origin)
-);
-
-CREATE TABLE IF NOT EXISTS current_state(
- pdu_id TEXT,
- origin TEXT,
- context TEXT,
- pdu_type TEXT,
- state_key TEXT,
- CONSTRAINT pdu_id_origin UNIQUE (pdu_id, origin)
- CONSTRAINT uniqueness UNIQUE (context, pdu_type, state_key) ON CONFLICT REPLACE
-);
-
--- Stores where each pdu we want to send should be sent and the delivery status.
-create TABLE IF NOT EXISTS pdu_destinations(
- pdu_id TEXT,
- origin TEXT,
- destination TEXT,
- delivered_ts INTEGER DEFAULT 0, -- or 0 if not delivered
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, destination) ON CONFLICT REPLACE
-);
-
-CREATE TABLE IF NOT EXISTS pdu_forward_extremities(
- pdu_id TEXT,
- origin TEXT,
- context TEXT,
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, context) ON CONFLICT REPLACE
-);
-
-CREATE TABLE IF NOT EXISTS pdu_backward_extremities(
- pdu_id TEXT,
- origin TEXT,
- context TEXT,
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, context) ON CONFLICT REPLACE
-);
-
-CREATE TABLE IF NOT EXISTS pdu_edges(
- pdu_id TEXT,
- origin TEXT,
- prev_pdu_id TEXT,
- prev_origin TEXT,
- context TEXT,
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, prev_pdu_id, prev_origin, context)
-);
-
-CREATE TABLE IF NOT EXISTS context_depth(
- context TEXT,
- min_depth INTEGER,
- CONSTRAINT uniqueness UNIQUE (context)
-);
-
-CREATE INDEX IF NOT EXISTS context_depth_context ON context_depth(context);
-
-
-CREATE INDEX IF NOT EXISTS pdu_id ON pdus(pdu_id, origin);
-
-CREATE INDEX IF NOT EXISTS dests_id ON pdu_destinations (pdu_id, origin);
--- CREATE INDEX IF NOT EXISTS dests ON pdu_destinations (destination);
-
-CREATE INDEX IF NOT EXISTS pdu_extrem_context ON pdu_forward_extremities(context);
-CREATE INDEX IF NOT EXISTS pdu_extrem_id ON pdu_forward_extremities(pdu_id, origin);
-
-CREATE INDEX IF NOT EXISTS pdu_edges_id ON pdu_edges(pdu_id, origin);
-
-CREATE INDEX IF NOT EXISTS pdu_b_extrem_context ON pdu_backward_extremities(context);
diff --git a/synapse/storage/schema/signatures.sql b/synapse/storage/schema/signatures.sql
deleted file mode 100644
index 1c45a51bec..0000000000
--- a/synapse/storage/schema/signatures.sql
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright 2014 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS pdu_content_hashes (
- pdu_id TEXT,
- origin TEXT,
- algorithm TEXT,
- hash BLOB,
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, algorithm)
-);
-
-CREATE INDEX IF NOT EXISTS pdu_content_hashes_id ON pdu_content_hashes (
- pdu_id, origin
-);
-
-CREATE TABLE IF NOT EXISTS pdu_reference_hashes (
- pdu_id TEXT,
- origin TEXT,
- algorithm TEXT,
- hash BLOB,
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, algorithm)
-);
-
-CREATE INDEX IF NOT EXISTS pdu_reference_hashes_id ON pdu_reference_hashes (
- pdu_id, origin
-);
-
-CREATE TABLE IF NOT EXISTS pdu_origin_signatures (
- pdu_id TEXT,
- origin TEXT,
- key_id TEXT,
- signature BLOB,
- CONSTRAINT uniqueness UNIQUE (pdu_id, origin, key_id)
-);
-
-CREATE INDEX IF NOT EXISTS pdu_origin_signatures_id ON pdu_origin_signatures (
- pdu_id, origin
-);
-
-CREATE TABLE IF NOT EXISTS pdu_edge_hashes(
- pdu_id TEXT,
- origin TEXT,
- prev_pdu_id TEXT,
- prev_origin TEXT,
- algorithm TEXT,
- hash BLOB,
- CONSTRAINT uniqueness UNIQUE (
- pdu_id, origin, prev_pdu_id, prev_origin, algorithm
- )
-);
-
-CREATE INDEX IF NOT EXISTS pdu_edge_hashes_id ON pdu_edge_hashes(
- pdu_id, origin
-);
diff --git a/synapse/storage/schema/state.sql b/synapse/storage/schema/state.sql
new file mode 100644
index 0000000000..b44c56b519
--- /dev/null
+++ b/synapse/storage/schema/state.sql
@@ -0,0 +1,33 @@
+/* Copyright 2014 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS state_groups(
+ id INTEGER PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS state_groups_state(
+ state_group INTEGER NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS event_to_state_groups(
+ event_id TEXT NOT NULL,
+ state_group INTEGER NOT NULL
+);
\ No newline at end of file
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
index 82be946d3f..b4b3d5d7ea 100644
--- a/synapse/storage/signatures.py
+++ b/synapse/storage/signatures.py
@@ -17,139 +17,149 @@ from _base import SQLBaseStore
class SignatureStore(SQLBaseStore):
- """Persistence for PDU signatures and hashes"""
+ """Persistence for event signatures and hashes"""
- def _get_pdu_content_hashes_txn(self, txn, pdu_id, origin):
- """Get all the hashes for a given PDU.
+ def _get_event_content_hashes_txn(self, txn, event_id):
+ """Get all the hashes for a given Event.
Args:
txn (cursor):
- pdu_id (str): Id for the PDU.
- origin (str): origin of the PDU.
+ event_id (str): Id for the Event.
Returns:
A dict of algorithm -> hash.
"""
query = (
"SELECT algorithm, hash"
- " FROM pdu_content_hashes"
- " WHERE pdu_id = ? and origin = ?"
+ " FROM event_content_hashes"
+ " WHERE event_id = ?"
)
- txn.execute(query, (pdu_id, origin))
+ txn.execute(query, (event_id, ))
return dict(txn.fetchall())
- def _store_pdu_content_hash_txn(self, txn, pdu_id, origin, algorithm,
+ def _store_event_content_hash_txn(self, txn, event_id, algorithm,
hash_bytes):
- """Store a hash for a PDU
+ """Store a hash for a Event
Args:
txn (cursor):
- pdu_id (str): Id for the PDU.
- origin (str): origin of the PDU.
+ event_id (str): Id for the Event.
algorithm (str): Hashing algorithm.
hash_bytes (bytes): Hash function output bytes.
"""
- self._simple_insert_txn(txn, "pdu_content_hashes", {
- "pdu_id": pdu_id,
- "origin": origin,
- "algorithm": algorithm,
- "hash": buffer(hash_bytes),
- })
+ self._simple_insert_txn(
+ txn,
+ "event_content_hashes",
+ {
+ "event_id": event_id,
+ "algorithm": algorithm,
+ "hash": buffer(hash_bytes),
+ },
+ or_ignore=True,
+ )
- def _get_pdu_reference_hashes_txn(self, txn, pdu_id, origin):
+ def _get_event_reference_hashes_txn(self, txn, event_id):
"""Get all the hashes for a given PDU.
Args:
txn (cursor):
- pdu_id (str): Id for the PDU.
- origin (str): origin of the PDU.
+ event_id (str): Id for the Event.
Returns:
A dict of algorithm -> hash.
"""
query = (
"SELECT algorithm, hash"
- " FROM pdu_reference_hashes"
- " WHERE pdu_id = ? and origin = ?"
+ " FROM event_reference_hashes"
+ " WHERE event_id = ?"
)
- txn.execute(query, (pdu_id, origin))
+ txn.execute(query, (event_id, ))
return dict(txn.fetchall())
- def _store_pdu_reference_hash_txn(self, txn, pdu_id, origin, algorithm,
+ def _store_event_reference_hash_txn(self, txn, event_id, algorithm,
hash_bytes):
"""Store a hash for a PDU
Args:
txn (cursor):
- pdu_id (str): Id for the PDU.
- origin (str): origin of the PDU.
+ event_id (str): Id for the Event.
algorithm (str): Hashing algorithm.
hash_bytes (bytes): Hash function output bytes.
"""
- self._simple_insert_txn(txn, "pdu_reference_hashes", {
- "pdu_id": pdu_id,
- "origin": origin,
- "algorithm": algorithm,
- "hash": buffer(hash_bytes),
- })
+ self._simple_insert_txn(
+ txn,
+ "event_reference_hashes",
+ {
+ "event_id": event_id,
+ "algorithm": algorithm,
+ "hash": buffer(hash_bytes),
+ },
+ or_ignore=True,
+ )
- def _get_pdu_origin_signatures_txn(self, txn, pdu_id, origin):
+ def _get_event_origin_signatures_txn(self, txn, event_id):
"""Get all the signatures for a given PDU.
Args:
txn (cursor):
- pdu_id (str): Id for the PDU.
- origin (str): origin of the PDU.
+ event_id (str): Id for the Event.
Returns:
A dict of key_id -> signature_bytes.
"""
query = (
"SELECT key_id, signature"
- " FROM pdu_origin_signatures"
- " WHERE pdu_id = ? and origin = ?"
+ " FROM event_origin_signatures"
+ " WHERE event_id = ? "
)
- txn.execute(query, (pdu_id, origin))
+ txn.execute(query, (event_id, ))
return dict(txn.fetchall())
- def _store_pdu_origin_signature_txn(self, txn, pdu_id, origin, key_id,
- signature_bytes):
+ def _store_event_origin_signature_txn(self, txn, event_id, origin, key_id,
+ signature_bytes):
"""Store a signature from the origin server for a PDU.
Args:
txn (cursor):
- pdu_id (str): Id for the PDU.
- origin (str): origin of the PDU.
+ event_id (str): Id for the Event.
+ origin (str): origin of the Event.
key_id (str): Id for the signing key.
signature (bytes): The signature.
"""
- self._simple_insert_txn(txn, "pdu_origin_signatures", {
- "pdu_id": pdu_id,
- "origin": origin,
- "key_id": key_id,
- "signature": buffer(signature_bytes),
- })
+ self._simple_insert_txn(
+ txn,
+ "event_origin_signatures",
+ {
+ "event_id": event_id,
+ "origin": origin,
+ "key_id": key_id,
+ "signature": buffer(signature_bytes),
+ },
+ or_ignore=True,
+ )
- def _get_prev_pdu_hashes_txn(self, txn, pdu_id, origin):
+ def _get_prev_event_hashes_txn(self, txn, event_id):
"""Get all the hashes for previous PDUs of a PDU
Args:
txn (cursor):
- pdu_id (str): Id of the PDU.
- origin (str): Origin of the PDU.
+ event_id (str): Id for the Event.
Returns:
dict of (pdu_id, origin) -> dict of algorithm -> hash_bytes.
"""
query = (
- "SELECT prev_pdu_id, prev_origin, algorithm, hash"
- " FROM pdu_edge_hashes"
- " WHERE pdu_id = ? and origin = ?"
+ "SELECT prev_event_id, algorithm, hash"
+ " FROM event_edge_hashes"
+ " WHERE event_id = ?"
)
- txn.execute(query, (pdu_id, origin))
+ txn.execute(query, (event_id, ))
results = {}
- for prev_pdu_id, prev_origin, algorithm, hash_bytes in txn.fetchall():
- hashes = results.setdefault((prev_pdu_id, prev_origin), {})
+ for prev_event_id, algorithm, hash_bytes in txn.fetchall():
+ hashes = results.setdefault(prev_event_id, {})
hashes[algorithm] = hash_bytes
return results
- def _store_prev_pdu_hash_txn(self, txn, pdu_id, origin, prev_pdu_id,
- prev_origin, algorithm, hash_bytes):
- self._simple_insert_txn(txn, "pdu_edge_hashes", {
- "pdu_id": pdu_id,
- "origin": origin,
- "prev_pdu_id": prev_pdu_id,
- "prev_origin": prev_origin,
- "algorithm": algorithm,
- "hash": buffer(hash_bytes),
- })
+ def _store_prev_event_hash_txn(self, txn, event_id, prev_event_id,
+ algorithm, hash_bytes):
+ self._simple_insert_txn(
+ txn,
+ "event_edge_hashes",
+ {
+ "event_id": event_id,
+ "prev_event_id": prev_event_id,
+ "algorithm": algorithm,
+ "hash": buffer(hash_bytes),
+ },
+ or_ignore=True,
+ )
\ No newline at end of file
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
new file mode 100644
index 0000000000..e08acd6404
--- /dev/null
+++ b/synapse/storage/state.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from twisted.internet import defer
+
+from collections import namedtuple
+
+
+StateGroup = namedtuple("StateGroup", ("group", "state"))
+
+
+class StateStore(SQLBaseStore):
+
+ @defer.inlineCallbacks
+ def get_state_groups(self, event_ids):
+ groups = set()
+ for event_id in event_ids:
+ group = yield self._simple_select_one_onecol(
+ table="event_to_state_groups",
+ keyvalues={"event_id": event_id},
+ retcol="state_group",
+ allow_none=True,
+ )
+ if group:
+ groups.add(group)
+
+ res = []
+ for group in groups:
+ state_ids = yield self._simple_select_onecol(
+ table="state_groups_state",
+ keyvalues={"state_group": group},
+ retcol="event_id",
+ )
+ state = []
+ for state_id in state_ids:
+ s = yield self.get_event(
+ state_id,
+ allow_none=True,
+ )
+ if s:
+ state.append(s)
+
+ res.append(StateGroup(group, state))
+
+ defer.returnValue(res)
+
+ def store_state_groups(self, event):
+ return self.runInteraction(
+ "store_state_groups",
+ self._store_state_groups_txn, event
+ )
+
+ def _store_state_groups_txn(self, txn, event):
+ if not event.state_events:
+ return
+
+ state_group = event.state_group
+ if not state_group:
+ state_group = self._simple_insert_txn(
+ txn,
+ table="state_groups",
+ values={
+ "room_id": event.room_id,
+ "event_id": event.event_id,
+ }
+ )
+
+ for state in event.state_events.values():
+ self._simple_insert_txn(
+ txn,
+ table="state_groups_state",
+ values={
+ "state_group": state_group,
+ "room_id": state.room_id,
+ "type": state.type,
+ "state_key": state.state_key,
+ "event_id": state.event_id,
+ }
+ )
+
+ self._simple_insert_txn(
+ txn,
+ table="event_to_state_groups",
+ values={
+ "state_group": state_group,
+ "event_id": event.event_id,
+ }
+ )
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index d61f909939..8f7f61d29d 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -309,7 +309,10 @@ class StreamStore(SQLBaseStore):
defer.returnValue(ret)
def get_room_events_max_id(self):
- return self.runInteraction(self._get_room_events_max_id_txn)
+ return self.runInteraction(
+ "get_room_events_max_id",
+ self._get_room_events_max_id_txn
+ )
def _get_room_events_max_id_txn(self, txn):
txn.execute(
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index 2ba8e30efe..00d0f48082 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -14,7 +14,6 @@
# limitations under the License.
from ._base import SQLBaseStore, Table
-from .pdu import PdusTable
from collections import namedtuple
@@ -42,6 +41,7 @@ class TransactionStore(SQLBaseStore):
"""
return self.runInteraction(
+ "get_received_txn_response",
self._get_received_txn_response, transaction_id, origin
)
@@ -73,6 +73,7 @@ class TransactionStore(SQLBaseStore):
"""
return self.runInteraction(
+ "set_received_txn_response",
self._set_received_txn_response,
transaction_id, origin, code, response_dict
)
@@ -88,7 +89,7 @@ class TransactionStore(SQLBaseStore):
txn.execute(query, (code, response_json, transaction_id, origin))
def prep_send_transaction(self, transaction_id, destination,
- origin_server_ts, pdu_list):
+ origin_server_ts):
"""Persists an outgoing transaction and calculates the values for the
previous transaction id list.
@@ -99,19 +100,19 @@ class TransactionStore(SQLBaseStore):
transaction_id (str)
destination (str)
origin_server_ts (int)
- pdu_list (list)
Returns:
list: A list of previous transaction ids.
"""
return self.runInteraction(
+ "prep_send_transaction",
self._prep_send_transaction,
- transaction_id, destination, origin_server_ts, pdu_list
+ transaction_id, destination, origin_server_ts
)
def _prep_send_transaction(self, txn, transaction_id, destination,
- origin_server_ts, pdu_list):
+ origin_server_ts):
# First we find out what the prev_txs should be.
# Since we know that we are only sending one transaction at a time,
@@ -139,15 +140,15 @@ class TransactionStore(SQLBaseStore):
# Update the tx id -> pdu id mapping
- values = [
- (transaction_id, destination, pdu[0], pdu[1])
- for pdu in pdu_list
- ]
-
- logger.debug("Inserting: %s", repr(values))
-
- query = TransactionsToPduTable.insert_statement()
- txn.executemany(query, values)
+ # values = [
+ # (transaction_id, destination, pdu[0], pdu[1])
+ # for pdu in pdu_list
+ # ]
+ #
+ # logger.debug("Inserting: %s", repr(values))
+ #
+ # query = TransactionsToPduTable.insert_statement()
+ # txn.executemany(query, values)
return prev_txns
@@ -161,6 +162,7 @@ class TransactionStore(SQLBaseStore):
response_json (str)
"""
return self.runInteraction(
+ "delivered_txn",
self._delivered_txn,
transaction_id, destination, code, response_dict
)
@@ -186,6 +188,7 @@ class TransactionStore(SQLBaseStore):
list: A list of `ReceivedTransactionsTable.EntryType`
"""
return self.runInteraction(
+ "get_transactions_after",
self._get_transactions_after, transaction_id, destination
)
@@ -202,49 +205,6 @@ class TransactionStore(SQLBaseStore):
return ReceivedTransactionsTable.decode_results(txn.fetchall())
- def get_pdus_after_transaction(self, transaction_id, destination):
- """For a given local transaction_id that we sent to a given destination
- home server, return a list of PDUs that were sent to that destination
- after it.
-
- Args:
- txn
- transaction_id (str)
- destination (str)
-
- Returns
- list: A list of PduTuple
- """
- return self.runInteraction(
- self._get_pdus_after_transaction,
- transaction_id, destination
- )
-
- def _get_pdus_after_transaction(self, txn, transaction_id, destination):
-
- # Query that first get's all transaction_ids with an id greater than
- # the one given from the `sent_transactions` table. Then JOIN on this
- # from the `tx->pdu` table to get a list of (pdu_id, origin) that
- # specify the pdus that were sent in those transactions.
- query = (
- "SELECT pdu_id, pdu_origin FROM %(tx_pdu)s as tp "
- "INNER JOIN %(sent_tx)s as st "
- "ON tp.transaction_id = st.transaction_id "
- "AND tp.destination = st.destination "
- "WHERE st.id > ("
- "SELECT id FROM %(sent_tx)s "
- "WHERE transaction_id = ? AND destination = ?"
- ) % {
- "tx_pdu": TransactionsToPduTable.table_name,
- "sent_tx": SentTransactions.table_name,
- }
-
- txn.execute(query, (transaction_id, destination))
-
- pdus = PdusTable.decode_results(txn.fetchall())
-
- return self._get_pdu_tuples(txn, pdus)
-
class ReceivedTransactionsTable(Table):
table_name = "received_transactions"
|