diff --git a/synapse/storage/schema/delta/56/stats_separated1.sql b/synapse/storage/schema/delta/56/stats_separated1.sql
index 26606b2b60..4c4a605e96 100644
--- a/synapse/storage/schema/delta/56/stats_separated1.sql
+++ b/synapse/storage/schema/delta/56/stats_separated1.sql
@@ -61,7 +61,7 @@ INSERT INTO stats_incremental_position (
total_events_min_stream_ordering,
total_events_max_stream_ordering,
is_background_contract
-) VALUES (NULL, NULL, NULL, FALSE), (NULL, NULL, NULL, TRUE);
+) VALUES (NULL, NULL, NULL, (0 = 1)), (NULL, NULL, NULL, (1 = 1));
-- represents PRESENT room statistics for a room
CREATE TABLE IF NOT EXISTS room_stats_current (
@@ -102,14 +102,15 @@ CREATE TABLE IF NOT EXISTS room_stats_historical (
-- Note that end_ts is quantised, and start_ts usually so.
end_ts BIGINT NOT NULL,
bucket_size INT NOT NULL,
- PRIMARY KEY (room_id, end_ts),
current_state_events INT NOT NULL,
total_events INT NOT NULL,
joined_members INT NOT NULL,
invited_members INT NOT NULL,
left_members INT NOT NULL,
- banned_members INT NOT NULL
+ banned_members INT NOT NULL,
+
+ PRIMARY KEY (room_id, end_ts)
);
-- We use this index to speed up deletion of old user stats.
@@ -148,10 +149,11 @@ CREATE TABLE IF NOT EXISTS user_stats_historical (
user_id TEXT NOT NULL,
end_ts BIGINT NOT NULL,
bucket_size INT NOT NULL,
- PRIMARY KEY (user_id, end_ts),
public_rooms INT NOT NULL,
- private_rooms INT NOT NULL
+ private_rooms INT NOT NULL,
+
+ PRIMARY KEY (user_id, end_ts)
);
-- We use this index to speed up deletion of old user stats.
@@ -160,8 +162,6 @@ CREATE INDEX IF NOT EXISTS user_stats_historical_end_ts ON user_stats_historical
-- We don't need an index on (user_id, end_ts) because PRIMARY KEY sorts that
-- out for us. (We would want it to review stats for a particular user.)
--- TODO old SQLites may not support partial indices
-
-- Set up staging tables
-- we depend on current_state_events_membership because this is used
diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py
index 8927e5f6c4..b65eef72bd 100644
--- a/synapse/storage/stats.py
+++ b/synapse/storage/stats.py
@@ -290,7 +290,7 @@ class StatsStore(StateDeltasStore):
yield self._end_background_update("populate_stats_process_users")
defer.returnValue(1)
- for user_id in users_to_work_on:
+ for (user_id,) in users_to_work_on:
now = self.hs.get_reactor().seconds()
def _process_user(txn):
@@ -430,7 +430,7 @@ class StatsStore(StateDeltasStore):
yield self._end_background_update("populate_stats_process_rooms")
defer.returnValue(1)
- for room_id in rooms_to_work_on:
+ for (room_id,) in rooms_to_work_on:
current_state_ids = yield self.get_current_state_ids(room_id)
join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
@@ -900,11 +900,19 @@ class StatsStore(StateDeltasStore):
)
# `end_ts IS NOT NULL` is for partial index optimisation
- sql = (
- "SELECT %s FROM %s_current"
- " WHERE end_ts <= ? AND end_ts IS NOT NULL"
- " LIMIT %d FOR UPDATE"
- ) % (id_col, table, limit)
+ if isinstance(self.database_engine, Sqlite3Engine):
+ # SQLite doesn't support SELECT FOR UPDATE
+ sql = (
+ "SELECT %s FROM %s_current"
+ " WHERE end_ts <= ? AND end_ts IS NOT NULL"
+ " LIMIT %d"
+ ) % (id_col, table, limit)
+ else:
+ sql = (
+ "SELECT %s FROM %s_current"
+ " WHERE end_ts <= ? AND end_ts IS NOT NULL"
+ " LIMIT %d FOR UPDATE"
+ ) % (id_col, table, limit)
txn.execute(sql, (quantised_ts,))
maybe_more = txn.rowcount == limit
updates = txn.fetchall()
|