diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index a61e83d5de..9996f195a0 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -113,6 +113,9 @@ class DataStore(RoomMemberStore, RoomStore,
self._device_inbox_id_gen = StreamIdGenerator(
db_conn, "device_max_stream_id", "stream_id"
)
+ self._public_room_id_gen = StreamIdGenerator(
+ db_conn, "public_room_list_stream", "stream_id"
+ )
self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id")
self._state_groups_id_gen = IdGenerator(db_conn, "state_groups", "id")
@@ -219,6 +222,8 @@ class DataStore(RoomMemberStore, RoomStore,
self._find_stream_orderings_for_times, 60 * 60 * 1000
)
+ self._stream_order_on_start = self.get_room_max_stream_ordering()
+
super(DataStore, self).__init__(hs)
def take_presence_startup_info(self):
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index a854a87eab..3d5994a580 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -37,7 +37,7 @@ class ApplicationServiceStore(SQLBaseStore):
)
def get_app_services(self):
- return defer.succeed(self.services_cache)
+ return self.services_cache
def get_app_service_by_user_id(self, user_id):
"""Retrieve an application service from their user ID.
@@ -54,8 +54,8 @@ class ApplicationServiceStore(SQLBaseStore):
"""
for service in self.services_cache:
if service.sender == user_id:
- return defer.succeed(service)
- return defer.succeed(None)
+ return service
+ return None
def get_app_service_by_token(self, token):
"""Get the application service with the given appservice token.
@@ -67,8 +67,8 @@ class ApplicationServiceStore(SQLBaseStore):
"""
for service in self.services_cache:
if service.token == token:
- return defer.succeed(service)
- return defer.succeed(None)
+ return service
+ return None
def get_app_service_rooms(self, service):
"""Get a list of RoomsForUser for this application service.
@@ -163,7 +163,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
["as_id"]
)
# NB: This assumes this class is linked with ApplicationServiceStore
- as_list = yield self.get_app_services()
+ as_list = self.get_app_services()
services = []
for res in results:
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 0827946207..53feaa1960 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -16,6 +16,7 @@
from twisted.internet import defer
from ._base import SQLBaseStore
+from synapse.api.errors import StoreError
from synapse.util.caches.descriptors import cached
from unpaddedbase64 import encode_base64
@@ -36,6 +37,13 @@ class EventFederationStore(SQLBaseStore):
and backfilling from another server respectively.
"""
+ def __init__(self, hs):
+ super(EventFederationStore, self).__init__(hs)
+
+ hs.get_clock().looping_call(
+ self._delete_old_forward_extrem_cache, 60 * 60 * 1000
+ )
+
def get_auth_chain(self, event_ids):
return self.get_auth_chain_ids(event_ids).addCallback(self._get_events)
@@ -270,6 +278,37 @@ class EventFederationStore(SQLBaseStore):
]
)
+ # We now insert into stream_ordering_to_exterm a mapping from room_id,
+ # new stream_ordering to new forward extremeties in the room.
+ # This allows us to later efficiently look up the forward extremeties
+ # for a room before a given stream_ordering
+ max_stream_ord = max(
+ ev.internal_metadata.stream_ordering for ev in events
+ )
+ new_extrem = {}
+ for room_id in events_by_room:
+ event_ids = self._simple_select_onecol_txn(
+ txn,
+ table="event_forward_extremities",
+ keyvalues={"room_id": room_id},
+ retcol="event_id",
+ )
+ new_extrem[room_id] = event_ids
+
+ self._simple_insert_many_txn(
+ txn,
+ table="stream_ordering_to_exterm",
+ values=[
+ {
+ "room_id": room_id,
+ "event_id": event_id,
+ "stream_ordering": max_stream_ord,
+ }
+ for room_id, extrem_evs in new_extrem.items()
+ for event_id in extrem_evs
+ ]
+ )
+
query = (
"INSERT INTO event_backward_extremities (event_id, room_id)"
" SELECT ?, ? WHERE NOT EXISTS ("
@@ -305,6 +344,75 @@ class EventFederationStore(SQLBaseStore):
self.get_latest_event_ids_in_room.invalidate, (room_id,)
)
+ def get_forward_extremeties_for_room(self, room_id, stream_ordering):
+ # We want to make the cache more effective, so we clamp to the last
+ # change before the given ordering.
+ last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id)
+
+ # We don't always have a full stream_to_exterm_id table, e.g. after
+ # the upgrade that introduced it, so we make sure we never ask for a
+ # try and pin to a stream_ordering from before a restart
+ last_change = max(self._stream_order_on_start, last_change)
+
+ if last_change > self.stream_ordering_month_ago:
+ stream_ordering = min(last_change, stream_ordering)
+
+ return self._get_forward_extremeties_for_room(room_id, stream_ordering)
+
+ @cached(max_entries=5000, num_args=2)
+ def _get_forward_extremeties_for_room(self, room_id, stream_ordering):
+ """For a given room_id and stream_ordering, return the forward
+ extremeties of the room at that point in "time".
+
+ Throws a StoreError if we have since purged the index for
+ stream_orderings from that point.
+ """
+
+ if stream_ordering <= self.stream_ordering_month_ago:
+ raise StoreError(400, "stream_ordering too old")
+
+ sql = ("""
+ SELECT event_id FROM stream_ordering_to_exterm
+ INNER JOIN (
+ SELECT room_id, MAX(stream_ordering) AS stream_ordering
+ FROM stream_ordering_to_exterm
+ WHERE stream_ordering <= ? GROUP BY room_id
+ ) AS rms USING (room_id, stream_ordering)
+ WHERE room_id = ?
+ """)
+
+ def get_forward_extremeties_for_room_txn(txn):
+ txn.execute(sql, (stream_ordering, room_id))
+ rows = txn.fetchall()
+ return [event_id for event_id, in rows]
+
+ return self.runInteraction(
+ "get_forward_extremeties_for_room",
+ get_forward_extremeties_for_room_txn
+ )
+
+ def _delete_old_forward_extrem_cache(self):
+ def _delete_old_forward_extrem_cache_txn(txn):
+ # Delete entries older than a month, while making sure we don't delete
+ # the only entries for a room.
+ sql = ("""
+ DELETE FROM stream_ordering_to_exterm
+ WHERE
+ room_id IN (
+ SELECT room_id
+ FROM stream_ordering_to_exterm
+ WHERE stream_ordering > ?
+ ) AND stream_ordering < ?
+ """)
+ txn.execute(
+ sql,
+ (self.stream_ordering_month_ago, self.stream_ordering_month_ago,)
+ )
+ return self.runInteraction(
+ "_delete_old_forward_extrem_cache",
+ _delete_old_forward_extrem_cache_txn
+ )
+
def get_backfill_events(self, room_id, event_list, limit):
"""Get a list of Events for a given topic that occurred before (and
including) the events in event_list. Return a list of max size `limit`
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 65b3775f1f..49aeb953bd 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1354,39 +1354,53 @@ class EventsStore(SQLBaseStore):
min_stream_id = rows[-1][0]
event_ids = [row[1] for row in rows]
- events = self._get_events_txn(txn, event_ids)
+ rows_to_update = []
- rows = []
- for event in events:
- try:
- event_id = event.event_id
- origin_server_ts = event.origin_server_ts
- except (KeyError, AttributeError):
- # If the event is missing a necessary field then
- # skip over it.
- continue
+ chunks = [
+ event_ids[i:i + 100]
+ for i in xrange(0, len(event_ids), 100)
+ ]
+ for chunk in chunks:
+ ev_rows = self._simple_select_many_txn(
+ txn,
+ table="event_json",
+ column="event_id",
+ iterable=chunk,
+ retcols=["event_id", "json"],
+ keyvalues={},
+ )
- rows.append((origin_server_ts, event_id))
+ for row in ev_rows:
+ event_id = row["event_id"]
+ event_json = json.loads(row["json"])
+ try:
+ origin_server_ts = event_json["origin_server_ts"]
+ except (KeyError, AttributeError):
+ # If the event is missing a necessary field then
+ # skip over it.
+ continue
+
+ rows_to_update.append((origin_server_ts, event_id))
sql = (
"UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
)
- for index in range(0, len(rows), INSERT_CLUMP_SIZE):
- clump = rows[index:index + INSERT_CLUMP_SIZE]
+ for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE):
+ clump = rows_to_update[index:index + INSERT_CLUMP_SIZE]
txn.executemany(sql, clump)
progress = {
"target_min_stream_id_inclusive": target_min_stream_id,
"max_stream_id_exclusive": min_stream_id,
- "rows_inserted": rows_inserted + len(rows)
+ "rows_inserted": rows_inserted + len(rows_to_update)
}
self._background_update_progress_txn(
txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
)
- return len(rows)
+ return len(rows_to_update)
result = yield self.runInteraction(
self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 7efbe51cda..08de3cc4c1 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 35
+SCHEMA_VERSION = 36
dir_path = os.path.abspath(os.path.dirname(__file__))
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 8251f58670..11813b44f6 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -48,15 +48,31 @@ class RoomStore(SQLBaseStore):
StoreError if the room could not be stored.
"""
try:
- yield self._simple_insert(
- "rooms",
- {
- "room_id": room_id,
- "creator": room_creator_user_id,
- "is_public": is_public,
- },
- desc="store_room",
- )
+ def store_room_txn(txn, next_id):
+ self._simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": room_creator_user_id,
+ "is_public": is_public,
+ },
+ )
+ if is_public:
+ self._simple_insert_txn(
+ txn,
+ table="public_room_list_stream",
+ values={
+ "stream_id": next_id,
+ "room_id": room_id,
+ "visibility": is_public,
+ }
+ )
+ with self._public_room_id_gen.get_next() as next_id:
+ yield self.runInteraction(
+ "store_room_txn",
+ store_room_txn, next_id,
+ )
except Exception as e:
logger.error("store_room with room_id=%s failed: %s", room_id, e)
raise StoreError(500, "Problem creating room.")
@@ -77,13 +93,45 @@ class RoomStore(SQLBaseStore):
allow_none=True,
)
+ @defer.inlineCallbacks
def set_room_is_public(self, room_id, is_public):
- return self._simple_update_one(
- table="rooms",
- keyvalues={"room_id": room_id},
- updatevalues={"is_public": is_public},
- desc="set_room_is_public",
- )
+ def set_room_is_public_txn(txn, next_id):
+ self._simple_update_one_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"is_public": is_public},
+ )
+
+ entries = self._simple_select_list_txn(
+ txn,
+ table="public_room_list_stream",
+ keyvalues={"room_id": room_id},
+ retcols=("stream_id", "visibility"),
+ )
+
+ entries.sort(key=lambda r: r["stream_id"])
+
+ add_to_stream = True
+ if entries:
+ add_to_stream = bool(entries[-1]["visibility"]) != is_public
+
+ if add_to_stream:
+ self._simple_insert_txn(
+ txn,
+ table="public_room_list_stream",
+ values={
+ "stream_id": next_id,
+ "room_id": room_id,
+ "visibility": is_public,
+ }
+ )
+
+ with self._public_room_id_gen.get_next() as next_id:
+ yield self.runInteraction(
+ "set_room_is_public",
+ set_room_is_public_txn, next_id,
+ )
def get_public_room_ids(self):
return self._simple_select_onecol(
@@ -207,3 +255,74 @@ class RoomStore(SQLBaseStore):
},
desc="add_event_report"
)
+
+ def get_current_public_room_stream_id(self):
+ return self._public_room_id_gen.get_current_token()
+
+ def get_public_room_ids_at_stream_id(self, stream_id):
+ return self.runInteraction(
+ "get_public_room_ids_at_stream_id",
+ self.get_public_room_ids_at_stream_id_txn, stream_id
+ )
+
+ def get_public_room_ids_at_stream_id_txn(self, txn, stream_id):
+ return {
+ rm
+ for rm, vis in self.get_published_at_stream_id_txn(txn, stream_id).items()
+ if vis
+ }
+
+ def get_published_at_stream_id_txn(self, txn, stream_id):
+ sql = ("""
+ SELECT room_id, visibility FROM public_room_list_stream
+ INNER JOIN (
+ SELECT room_id, max(stream_id) AS stream_id
+ FROM public_room_list_stream
+ WHERE stream_id <= ?
+ GROUP BY room_id
+ ) grouped USING (room_id, stream_id)
+ """)
+
+ txn.execute(sql, (stream_id,))
+ return dict(txn.fetchall())
+
+ def get_public_room_changes(self, prev_stream_id, new_stream_id):
+ def get_public_room_changes_txn(txn):
+ then_rooms = self.get_public_room_ids_at_stream_id_txn(txn, prev_stream_id)
+
+ now_rooms_dict = self.get_published_at_stream_id_txn(txn, new_stream_id)
+
+ now_rooms_visible = set(
+ rm for rm, vis in now_rooms_dict.items() if vis
+ )
+ now_rooms_not_visible = set(
+ rm for rm, vis in now_rooms_dict.items() if not vis
+ )
+
+ newly_visible = now_rooms_visible - then_rooms
+ newly_unpublished = now_rooms_not_visible & then_rooms
+
+ return newly_visible, newly_unpublished
+
+ return self.runInteraction(
+ "get_public_room_changes", get_public_room_changes_txn
+ )
+
+ def get_all_new_public_rooms(self, prev_id, current_id, limit):
+ def get_all_new_public_rooms(txn):
+ sql = ("""
+ SELECT stream_id, room_id, visibility FROM public_room_list_stream
+ WHERE stream_id > ? AND stream_id <= ?
+ ORDER BY stream_id ASC
+ LIMIT ?
+ """)
+
+ txn.execute(sql, (prev_id, current_id, limit,))
+ return txn.fetchall()
+
+ if prev_id == current_id:
+ return defer.succeed([])
+
+ return self.runInteraction(
+ "get_all_new_public_rooms", get_all_new_public_rooms
+ )
diff --git a/synapse/storage/schema/delta/35/public_room_list_change_stream.sql b/synapse/storage/schema/delta/35/public_room_list_change_stream.sql
new file mode 100644
index 0000000000..dd2bf2e28a
--- /dev/null
+++ b/synapse/storage/schema/delta/35/public_room_list_change_stream.sql
@@ -0,0 +1,33 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE public_room_list_stream (
+ stream_id BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ visibility BOOLEAN NOT NULL
+);
+
+INSERT INTO public_room_list_stream (stream_id, room_id, visibility)
+ SELECT 1, room_id, is_public FROM rooms
+ WHERE is_public = CAST(1 AS BOOLEAN);
+
+CREATE INDEX public_room_list_stream_idx on public_room_list_stream(
+ stream_id
+);
+
+CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream(
+ room_id, stream_id
+);
diff --git a/synapse/storage/schema/delta/35/stream_order_to_extrem.sql b/synapse/storage/schema/delta/35/stream_order_to_extrem.sql
new file mode 100644
index 0000000000..2b945d8a57
--- /dev/null
+++ b/synapse/storage/schema/delta/35/stream_order_to_extrem.sql
@@ -0,0 +1,37 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE stream_ordering_to_exterm (
+ stream_ordering BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+INSERT INTO stream_ordering_to_exterm (stream_ordering, room_id, event_id)
+ SELECT stream_ordering, room_id, event_id FROM event_forward_extremities
+ INNER JOIN (
+ SELECT room_id, max(stream_ordering) as stream_ordering FROM events
+ INNER JOIN event_forward_extremities USING (room_id, event_id)
+ GROUP BY room_id
+ ) AS rms USING (room_id);
+
+CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm(
+ stream_ordering
+);
+
+CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm(
+ room_id, stream_ordering
+);
diff --git a/synapse/storage/schema/delta/36/readd_public_rooms.sql b/synapse/storage/schema/delta/36/readd_public_rooms.sql
new file mode 100644
index 0000000000..90d8fd18f9
--- /dev/null
+++ b/synapse/storage/schema/delta/36/readd_public_rooms.sql
@@ -0,0 +1,26 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Re-add some entries to stream_ordering_to_exterm that were incorrectly deleted
+INSERT INTO stream_ordering_to_exterm (stream_ordering, room_id, event_id)
+ SELECT
+ (SELECT stream_ordering FROM events where event_id = e.event_id) AS stream_ordering,
+ room_id,
+ event_id
+ FROM event_forward_extremities AS e
+ WHERE NOT EXISTS (
+ SELECT room_id FROM stream_ordering_to_exterm AS s
+ WHERE s.room_id = e.room_id
+ );
diff --git a/synapse/storage/schema/delta/36/remove_auth_idx.py b/synapse/storage/schema/delta/36/remove_auth_idx.py
index deac8ea39f..4be72a86bc 100644
--- a/synapse/storage/schema/delta/36/remove_auth_idx.py
+++ b/synapse/storage/schema/delta/36/remove_auth_idx.py
@@ -13,7 +13,7 @@
# limitations under the License.
from synapse.storage.prepare_database import get_statements
-from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.engines import PostgresEngine
import logging
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index fdbdade536..49abf0ac74 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -307,6 +307,9 @@ class StateStore(SQLBaseStore):
def _get_state_groups_from_groups_txn(self, txn, groups, types=None):
results = {group: {} for group in groups}
+ if types is not None:
+ types = list(set(types)) # deduplicate types list
+
if isinstance(self.database_engine, PostgresEngine):
# Temporarily disable sequential scans in this transaction. This is
# a temporary hack until we can add the right indices in
@@ -375,10 +378,35 @@ class StateStore(SQLBaseStore):
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
for group in groups:
- group_tree = [group]
next_group = group
while next_group:
+ # We did this before by getting the list of group ids, and
+ # then passing that list to sqlite to get latest event for
+ # each (type, state_key). However, that was terribly slow
+ # without the right indicies (which we can't add until
+ # after we finish deduping state, which requires this func)
+ args = [next_group]
+ if types:
+ args.extend(i for typ in types for i in typ)
+
+ txn.execute(
+ "SELECT type, state_key, event_id FROM state_groups_state"
+ " WHERE state_group = ? %s" % (where_clause,),
+ args
+ )
+ rows = txn.fetchall()
+ results[group].update({
+ (typ, state_key): event_id
+ for typ, state_key, event_id in rows
+ if (typ, state_key) not in results[group]
+ })
+
+ # If the lengths match then we must have all the types,
+ # so no need to go walk further down the tree.
+ if types is not None and len(results[group]) == len(types):
+ break
+
next_group = self._simple_select_one_onecol_txn(
txn,
table="state_group_edges",
@@ -386,28 +414,6 @@ class StateStore(SQLBaseStore):
retcol="prev_state_group",
allow_none=True,
)
- if next_group:
- group_tree.append(next_group)
-
- sql = ("""
- SELECT type, state_key, event_id FROM state_groups_state
- INNER JOIN (
- SELECT type, state_key, max(state_group) as state_group
- FROM state_groups_state
- WHERE state_group IN (%s) %s
- GROUP BY type, state_key
- ) USING (type, state_key, state_group);
- """) % (",".join("?" for _ in group_tree), where_clause,)
-
- args = list(group_tree)
- if types is not None:
- args.extend([i for typ in types for i in typ])
-
- txn.execute(sql, args)
- rows = self.cursor_to_dict(txn)
- for row in rows:
- key = (row["type"], row["state_key"])
- results[group][key] = row["event_id"]
return results
@@ -817,16 +823,24 @@ class StateStore(SQLBaseStore):
@defer.inlineCallbacks
def _background_index_state(self, progress, batch_size):
- def reindex_txn(txn):
+ def reindex_txn(conn):
+ conn.rollback()
if isinstance(self.database_engine, PostgresEngine):
- txn.execute(
- "CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
- " ON state_groups_state(state_group, type, state_key)"
- )
- txn.execute(
- "DROP INDEX IF EXISTS state_groups_state_id"
- )
+ # postgres insists on autocommit for the index
+ conn.set_session(autocommit=True)
+ try:
+ txn = conn.cursor()
+ txn.execute(
+ "CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
+ " ON state_groups_state(state_group, type, state_key)"
+ )
+ txn.execute(
+ "DROP INDEX IF EXISTS state_groups_state_id"
+ )
+ finally:
+ conn.set_session(autocommit=False)
else:
+ txn = conn.cursor()
txn.execute(
"CREATE INDEX state_groups_state_type_idx"
" ON state_groups_state(state_group, type, state_key)"
@@ -835,9 +849,7 @@ class StateStore(SQLBaseStore):
"DROP INDEX IF EXISTS state_groups_state_id"
)
- yield self.runInteraction(
- self.STATE_GROUP_INDEX_UPDATE_NAME, reindex_txn
- )
+ yield self.runWithConnection(reindex_txn)
yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 0577a0525b..07ea969d4d 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -531,6 +531,9 @@ class StreamStore(SQLBaseStore):
)
defer.returnValue("t%d-%d" % (topo, token))
+ def get_room_max_stream_ordering(self):
+ return self._stream_id_gen.get_current_token()
+
def get_stream_token_for_event(self, event_id):
"""The stream token for an event
Args:
|