diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5233430028..c4dc3b3d51 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -24,7 +24,7 @@ from synapse.util.logutils import log_function
from synapse.api.constants import EventTypes
from canonicaljson import encode_canonical_json
-from contextlib import contextmanager
+from collections import namedtuple
import logging
import math
@@ -60,64 +60,71 @@ class EventsStore(SQLBaseStore):
)
@defer.inlineCallbacks
- def persist_events(self, events_and_contexts, backfilled=False,
- is_new_state=True):
+ def persist_events(self, events_and_contexts, backfilled=False):
if not events_and_contexts:
return
if backfilled:
- start = self.min_stream_token - 1
- self.min_stream_token -= len(events_and_contexts) + 1
- stream_orderings = range(start, self.min_stream_token, -1)
-
- @contextmanager
- def stream_ordering_manager():
- yield stream_orderings
- stream_ordering_manager = stream_ordering_manager()
+ stream_ordering_manager = self._backfill_id_gen.get_next_mult(
+ len(events_and_contexts)
+ )
else:
stream_ordering_manager = self._stream_id_gen.get_next_mult(
len(events_and_contexts)
)
+ state_group_id_manager = self._state_groups_id_gen.get_next_mult(
+ len(events_and_contexts)
+ )
with stream_ordering_manager as stream_orderings:
- for (event, _), stream in zip(events_and_contexts, stream_orderings):
- event.internal_metadata.stream_ordering = stream
-
- chunks = [
- events_and_contexts[x:x + 100]
- for x in xrange(0, len(events_and_contexts), 100)
- ]
+ with state_group_id_manager as state_group_ids:
+ for (event, context), stream, state_group_id in zip(
+ events_and_contexts, stream_orderings, state_group_ids
+ ):
+ event.internal_metadata.stream_ordering = stream
+ # Assign a state group_id in case a new id is needed for
+ # this context. In theory we only need to assign this
+ # for contexts that have current_state and aren't outliers
+ # but that make the code more complicated. Assigning an ID
+ # per event only causes the state_group_ids to grow as fast
+ # as the stream_ordering so in practise shouldn't be a problem.
+ context.new_state_group_id = state_group_id
+
+ chunks = [
+ events_and_contexts[x:x + 100]
+ for x in xrange(0, len(events_and_contexts), 100)
+ ]
- for chunk in chunks:
- # We can't easily parallelize these since different chunks
- # might contain the same event. :(
- yield self.runInteraction(
- "persist_events",
- self._persist_events_txn,
- events_and_contexts=chunk,
- backfilled=backfilled,
- is_new_state=is_new_state,
- )
+ for chunk in chunks:
+ # We can't easily parallelize these since different chunks
+ # might contain the same event. :(
+ yield self.runInteraction(
+ "persist_events",
+ self._persist_events_txn,
+ events_and_contexts=chunk,
+ backfilled=backfilled,
+ )
@defer.inlineCallbacks
@log_function
- def persist_event(self, event, context,
- is_new_state=True, current_state=None):
+ def persist_event(self, event, context, current_state=None):
+
try:
with self._stream_id_gen.get_next() as stream_ordering:
- event.internal_metadata.stream_ordering = stream_ordering
- yield self.runInteraction(
- "persist_event",
- self._persist_event_txn,
- event=event,
- context=context,
- is_new_state=is_new_state,
- current_state=current_state,
- )
+ with self._state_groups_id_gen.get_next() as state_group_id:
+ event.internal_metadata.stream_ordering = stream_ordering
+ context.new_state_group_id = state_group_id
+ yield self.runInteraction(
+ "persist_event",
+ self._persist_event_txn,
+ event=event,
+ context=context,
+ current_state=current_state,
+ )
except _RollbackButIsFineException:
pass
- max_persisted_id = yield self._stream_id_gen.get_max_token()
+ max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue((stream_ordering, max_persisted_id))
@defer.inlineCallbacks
@@ -177,8 +184,7 @@ class EventsStore(SQLBaseStore):
defer.returnValue({e.event_id: e for e in events})
@log_function
- def _persist_event_txn(self, txn, event, context,
- is_new_state=True, current_state=None):
+ def _persist_event_txn(self, txn, event, context, current_state):
# We purposefully do this first since if we include a `current_state`
# key, we *want* to update the `current_state_events` table
if current_state:
@@ -186,7 +192,16 @@ class EventsStore(SQLBaseStore):
txn.call_after(self.get_rooms_for_user.invalidate_all)
txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
- txn.call_after(self.get_room_name_and_aliases, event.room_id)
+ txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,))
+
+ # Add an entry to the current_state_resets table to record the point
+ # where we clobbered the current state
+ stream_order = event.internal_metadata.stream_ordering
+ self._simple_insert_txn(
+ txn,
+ table="current_state_resets",
+ values={"event_stream_ordering": stream_order}
+ )
self._simple_delete_txn(
txn,
@@ -210,12 +225,10 @@ class EventsStore(SQLBaseStore):
txn,
[(event, context)],
backfilled=False,
- is_new_state=is_new_state,
)
@log_function
- def _persist_events_txn(self, txn, events_and_contexts, backfilled,
- is_new_state=True):
+ def _persist_events_txn(self, txn, events_and_contexts, backfilled):
depth_updates = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
@@ -282,9 +295,7 @@ class EventsStore(SQLBaseStore):
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
- self._store_state_groups_txn(
- txn, event, context,
- )
+ self._store_mult_state_groups_txn(txn, ((event, context),))
metadata_json = encode_json(
event.internal_metadata.get_dict()
@@ -299,6 +310,18 @@ class EventsStore(SQLBaseStore):
(metadata_json, event.event_id,)
)
+ stream_order = event.internal_metadata.stream_ordering
+ state_group_id = context.state_group or context.new_state_group_id
+ self._simple_insert_txn(
+ txn,
+ table="ex_outlier_stream",
+ values={
+ "event_stream_ordering": stream_order,
+ "event_id": event.event_id,
+ "state_group": state_group_id,
+ }
+ )
+
sql = (
"UPDATE events SET outlier = ?"
" WHERE event_id = ?"
@@ -310,19 +333,14 @@ class EventsStore(SQLBaseStore):
self._update_extremeties(txn, [event])
- events_and_contexts = filter(
- lambda ec: ec[0] not in to_remove,
- events_and_contexts
- )
+ events_and_contexts = [
+ ec for ec in events_and_contexts if ec[0] not in to_remove
+ ]
if not events_and_contexts:
return
- self._store_mult_state_groups_txn(txn, [
- (event, context)
- for event, context in events_and_contexts
- if not event.internal_metadata.is_outlier()
- ])
+ self._store_mult_state_groups_txn(txn, events_and_contexts)
self._handle_mult_prev_events(
txn,
@@ -421,10 +439,9 @@ class EventsStore(SQLBaseStore):
txn, [event for event, _ in events_and_contexts]
)
- state_events_and_contexts = filter(
- lambda i: i[0].is_state(),
- events_and_contexts,
- )
+ state_events_and_contexts = [
+ ec for ec in events_and_contexts if ec[0].is_state()
+ ]
state_values = []
for event, context in state_events_and_contexts:
@@ -462,32 +479,50 @@ class EventsStore(SQLBaseStore):
],
)
- if is_new_state:
- for event, _ in state_events_and_contexts:
- if not context.rejected:
- txn.call_after(
- self._get_current_state_for_key.invalidate,
- (event.room_id, event.type, event.state_key,)
- )
+ if backfilled:
+ # Backfilled events come before the current state so we don't need
+ # to update the current state table
+ return
- if event.type in [EventTypes.Name, EventTypes.Aliases]:
- txn.call_after(
- self.get_room_name_and_aliases.invalidate,
- (event.room_id,)
- )
-
- self._simple_upsert_txn(
- txn,
- "current_state_events",
- keyvalues={
- "room_id": event.room_id,
- "type": event.type,
- "state_key": event.state_key,
- },
- values={
- "event_id": event.event_id,
- }
- )
+ for event, _ in state_events_and_contexts:
+ if (not event.internal_metadata.is_invite_from_remote()
+ and event.internal_metadata.is_outlier()):
+ # Outlier events generally shouldn't clobber the current state.
+ # However invites from remote severs for rooms we aren't in
+ # are a bit special: they don't come with any associated
+ # state so are technically an outlier, however all the
+ # client-facing code assumes that they are in the current
+ # state table so we insert the event anyway.
+ continue
+
+ if context.rejected:
+ # If the event failed it's auth checks then it shouldn't
+ # clobbler the current state.
+ continue
+
+ txn.call_after(
+ self._get_current_state_for_key.invalidate,
+ (event.room_id, event.type, event.state_key,)
+ )
+
+ if event.type in [EventTypes.Name, EventTypes.Aliases]:
+ txn.call_after(
+ self.get_room_name_and_aliases.invalidate,
+ (event.room_id,)
+ )
+
+ self._simple_upsert_txn(
+ txn,
+ "current_state_events",
+ keyvalues={
+ "room_id": event.room_id,
+ "type": event.type,
+ "state_key": event.state_key,
+ },
+ values={
+ "event_id": event.event_id,
+ }
+ )
return
@@ -1076,10 +1111,7 @@ class EventsStore(SQLBaseStore):
def get_current_backfill_token(self):
"""The current minimum token that backfilled events have reached"""
-
- # TODO: Fix race with the persit_event txn by using one of the
- # stream id managers
- return -self.min_stream_token
+ return -self._backfill_id_gen.get_current_token()
def get_all_new_events(self, last_backfill_id, last_forward_id,
current_backfill_id, current_forward_id, limit):
@@ -1087,10 +1119,12 @@ class EventsStore(SQLBaseStore):
new events or as backfilled events"""
def get_all_new_events_txn(txn):
sql = (
- "SELECT e.stream_ordering, ej.internal_metadata, ej.json"
+ "SELECT e.stream_ordering, ej.internal_metadata, ej.json, eg.state_group"
" FROM events as e"
" JOIN event_json as ej"
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
+ " LEFT JOIN event_to_state_groups as eg"
+ " ON e.event_id = eg.event_id"
" WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
" ORDER BY e.stream_ordering ASC"
" LIMIT ?"
@@ -1098,14 +1132,43 @@ class EventsStore(SQLBaseStore):
if last_forward_id != current_forward_id:
txn.execute(sql, (last_forward_id, current_forward_id, limit))
new_forward_events = txn.fetchall()
+
+ if len(new_forward_events) == limit:
+ upper_bound = new_forward_events[-1][0]
+ else:
+ upper_bound = current_forward_id
+
+ sql = (
+ "SELECT -event_stream_ordering FROM current_state_resets"
+ " WHERE ? < event_stream_ordering"
+ " AND event_stream_ordering <= ?"
+ " ORDER BY event_stream_ordering ASC"
+ )
+ txn.execute(sql, (last_forward_id, upper_bound))
+ state_resets = txn.fetchall()
+
+ sql = (
+ "SELECT -event_stream_ordering, event_id, state_group"
+ " FROM ex_outlier_stream"
+ " WHERE ? > event_stream_ordering"
+ " AND event_stream_ordering >= ?"
+ " ORDER BY event_stream_ordering DESC"
+ )
+ txn.execute(sql, (last_forward_id, upper_bound))
+ forward_ex_outliers = txn.fetchall()
else:
new_forward_events = []
+ state_resets = []
+ forward_ex_outliers = []
sql = (
- "SELECT -e.stream_ordering, ej.internal_metadata, ej.json"
+ "SELECT -e.stream_ordering, ej.internal_metadata, ej.json,"
+ " eg.state_group"
" FROM events as e"
" JOIN event_json as ej"
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
+ " LEFT JOIN event_to_state_groups as eg"
+ " ON e.event_id = eg.event_id"
" WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
" ORDER BY e.stream_ordering DESC"
" LIMIT ?"
@@ -1113,8 +1176,35 @@ class EventsStore(SQLBaseStore):
if last_backfill_id != current_backfill_id:
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
new_backfill_events = txn.fetchall()
+
+ if len(new_backfill_events) == limit:
+ upper_bound = new_backfill_events[-1][0]
+ else:
+ upper_bound = current_backfill_id
+
+ sql = (
+ "SELECT -event_stream_ordering, event_id, state_group"
+ " FROM ex_outlier_stream"
+ " WHERE ? > event_stream_ordering"
+ " AND event_stream_ordering >= ?"
+ " ORDER BY event_stream_ordering DESC"
+ )
+ txn.execute(sql, (-last_backfill_id, -upper_bound))
+ backward_ex_outliers = txn.fetchall()
else:
new_backfill_events = []
+ backward_ex_outliers = []
- return (new_forward_events, new_backfill_events)
+ return AllNewEventsResult(
+ new_forward_events, new_backfill_events,
+ forward_ex_outliers, backward_ex_outliers,
+ state_resets,
+ )
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
+
+
+AllNewEventsResult = namedtuple("AllNewEventsResult", [
+ "new_forward_events", "new_backfill_events",
+ "forward_ex_outliers", "backward_ex_outliers",
+ "state_resets"
+])
|