diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql
index 0fb3dbee55..ea04261ff0 100644
--- a/synapse/storage/schema/im.sql
+++ b/synapse/storage/schema/im.sql
@@ -14,7 +14,7 @@
*/
CREATE TABLE IF NOT EXISTS events(
- token_ordering INTEGER PRIMARY KEY AUTOINCREMENT,
+ stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT,
topological_ordering INTEGER NOT NULL,
event_id TEXT NOT NULL,
type TEXT NOT NULL,
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index c03c983e14..87fc978813 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -16,8 +16,9 @@
from twisted.internet import defer
from ._base import SQLBaseStore
-
+from synapse.api.errors import SynapseError
from synapse.api.constants import Membership
+from synapse.util.logutils import log_function
import json
import logging
@@ -29,9 +30,96 @@ logger = logging.getLogger(__name__)
MAX_STREAM_SIZE = 1000
+_STREAM_TOKEN = "stream"
+_TOPOLOGICAL_TOKEN = "topological"
+
+
+def _parse_stream_token(string):
+ try:
+ if string[0] != 's':
+ raise
+ return int(string[1:])
+ except:
+ logger.debug("Not stream token: %s", string)
+ raise SynapseError(400, "Invalid token")
+
+
+def _parse_topological_token(string):
+ try:
+ if string[0] != 't':
+ raise
+ parts = string[1:].split('-', 1)
+ return (int(parts[0]), int(parts[1]))
+ except:
+ logger.debug("Not topological token: %s", string)
+ raise SynapseError(400, "Invalid token")
+
+
+def is_stream_token(string):
+ try:
+ _parse_stream_token(string)
+ return True
+ except:
+ return False
+
+
+def is_topological_token(string):
+ try:
+ _parse_topological_token(string)
+ return True
+ except:
+ return False
+
+
+def _get_token_bound(token, comparison):
+ try:
+ s = _parse_stream_token(token)
+ return "%s %s %d" % ("stream_ordering", comparison, s)
+ except:
+ pass
+
+ try:
+ top, stream = _parse_topological_token(token)
+ return "%s %s %d AND %s %s %d" % (
+ "topological_ordering", comparison, top,
+ "stream_ordering", comparison, stream,
+ )
+ except:
+ pass
+
+ raise SynapseError(400, "Invalid token")
+
+
class StreamStore(SQLBaseStore):
+ @log_function
+ def get_room_events(self, user_id, from_key, to_key, room_id, limit=0,
+ direction='f', with_feedback=False):
+ is_events = (
+ direction == 'f'
+ and is_stream_token(from_key)
+ and to_key and is_stream_token(to_key)
+ )
+
+ if is_events:
+ return self.get_room_events_stream(
+ user_id=user_id,
+ from_key=from_key,
+ to_key=to_key,
+ room_id=room_id,
+ limit=limit,
+ with_feedback=with_feedback,
+ )
+ else:
+ return self.paginate_room_events(
+ from_key=from_key,
+ to_key=to_key,
+ room_id=room_id,
+ limit=limit,
+ with_feedback=with_feedback,
+ )
@defer.inlineCallbacks
+ @log_function
def get_room_events_stream(self, user_id, from_key, to_key, room_id,
limit=0, with_feedback=False):
# TODO (erikj): Handle compressed feedback
@@ -54,8 +142,8 @@ class StreamStore(SQLBaseStore):
limit = MAX_STREAM_SIZE
# From and to keys should be integers from ordering.
- from_key = int(from_key)
- to_key = int(to_key)
+ from_id = _parse_stream_token(from_key)
+ to_id = _parse_stream_token(to_key)
if from_key == to_key:
defer.returnValue(([], to_key))
@@ -65,42 +153,79 @@ class StreamStore(SQLBaseStore):
"SELECT * FROM events as e WHERE "
"((room_id IN (%(current)s)) OR "
"(event_id IN (%(invites)s))) "
+ "AND e.stream_ordering > ? AND e.stream_ordering < ? "
+ "ORDER BY stream_ordering ASC LIMIT %(limit)d "
) % {
"current": current_room_membership_sql,
"invites": invites_sql,
+ "limit": limit
}
- # Constraints and ordering depend on direction.
- if from_key < to_key:
- sql += (
- "AND e.token_ordering > ? AND e.token_ordering < ? "
- "ORDER BY token_ordering ASC LIMIT %(limit)d "
- ) % {"limit": limit}
- else:
- sql += (
- "AND e.token_ordering < ? "
- "AND e.token_ordering > ? "
- "ORDER BY e.token_ordering DESC LIMIT %(limit)d "
- ) % {"limit": int(limit)}
-
rows = yield self._execute_and_decode(
sql,
- user_id, user_id, Membership.INVITE, from_key, to_key
+ user_id, user_id, Membership.INVITE, from_id, to_id
)
ret = [self._parse_event_from_row(r) for r in rows]
if rows:
- if from_key < to_key:
- key = max([r["token_ordering"] for r in rows])
- else:
- key = min([r["token_ordering"] for r in rows])
+ key = "s%d" % max([r["stream_ordering"] for r in rows])
else:
+ # Assume we didn't get anything because there was nothing to get.
key = to_key
defer.returnValue((ret, key))
@defer.inlineCallbacks
+ @log_function
+ def paginate_room_events(self, room_id, from_key, to_key=None,
+ direction='b', limit=-1,
+ with_feedback=False):
+ # TODO (erikj): Handle compressed feedback
+
+ from_comp = '<' if direction =='b' else '>'
+ to_comp = '>' if direction =='b' else '<'
+ order = "DESC" if direction == 'b' else "ASC"
+
+ args = [room_id]
+
+ bounds = _get_token_bound(from_key, from_comp)
+ if to_key:
+ bounds = "%s AND %s" % (bounds, _get_token_bound(to_key, to_comp))
+
+ if int(limit) > 0:
+ args.append(int(limit))
+ limit_str = " LIMIT ?"
+ else:
+ limit_str = ""
+
+ sql = (
+ "SELECT * FROM events "
+ "WHERE room_id = ? AND %(bounds)s "
+ "ORDER BY topological_ordering %(order)s, stream_ordering %(order)s %(limit)s "
+ ) % {"bounds": bounds, "order": order, "limit": limit_str}
+
+ rows = yield self._execute_and_decode(
+ sql,
+ *args
+ )
+
+ if rows:
+ topo = rows[-1]["topological_ordering"]
+ toke = rows[-1]["stream_ordering"]
+ next_token = "t%s-%s" % (topo, toke)
+ else:
+ # TODO (erikj): We should work out what to do here instead.
+ next_token = to_key if to_key else from_key
+
+ defer.returnValue(
+ (
+ [self._parse_event_from_row(r) for r in rows],
+ next_token
+ )
+ )
+
+ @defer.inlineCallbacks
def get_recent_events_for_room(self, room_id, limit, with_feedback=False):
# TODO (erikj): Handle compressed feedback
@@ -108,8 +233,8 @@ class StreamStore(SQLBaseStore):
sql = (
"SELECT * FROM events "
- "WHERE room_id = ? AND token_ordering <= ? "
- "ORDER BY topological_ordering, token_ordering DESC LIMIT ? "
+ "WHERE room_id = ? AND stream_ordering <= ? "
+ "ORDER BY topological_ordering, stream_ordering DESC LIMIT ? "
)
rows = yield self._execute_and_decode(
@@ -121,12 +246,12 @@ class StreamStore(SQLBaseStore):
if rows:
topo = rows[0]["topological_ordering"]
- toke = rows[0]["token_ordering"]
+ toke = rows[0]["stream_ordering"]
start_token = "p%s-%s" % (topo, toke)
token = (start_token, end_token)
else:
- token = ("START", end_token)
+ token = (end_token, end_token)
defer.returnValue(
(
@@ -138,11 +263,14 @@ class StreamStore(SQLBaseStore):
@defer.inlineCallbacks
def get_room_events_max_id(self):
res = yield self._execute_and_decode(
- "SELECT MAX(token_ordering) as m FROM events"
+ "SELECT MAX(stream_ordering) as m FROM events"
)
- if not res:
- defer.returnValue(0)
+ logger.debug("get_room_events_max_id: %s", res)
+
+ if not res or not res[0] or not res[0]["m"]:
+ defer.returnValue("s1")
return
- defer.returnValue(res[0]["m"])
+ key = res[0]["m"] + 1
+ defer.returnValue("s%d" % (key,))
|