summary refs log tree commit diff
path: root/synapse/storage/schema
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2018-06-05 14:39:35 +0100
committerErik Johnston <erik@matrix.org>2018-06-05 16:40:16 +0100
commit918a5055ff9acb23476a178a74ca3363366504ed (patch)
treea282e75101926b961956b6ebf08837efb11edb26 /synapse/storage/schema
parentImplement backgroud update for chunks (diff)
downloadsynapse-github/erikj/chunks_stern.tar.xz
Use fractions for ordering of chunks github/erikj/chunks_stern erikj/chunks_stern
Using floats turned out to be a bad idea, as it broke subtely if the
needed precision was too large. This PR replaces the implementation with
one that uses fractions and stores them in the database as two integers.
Diffstat (limited to 'synapse/storage/schema')
-rw-r--r--synapse/storage/schema/delta/49/event_chunks.py47
1 files changed, 39 insertions, 8 deletions
diff --git a/synapse/storage/schema/delta/49/event_chunks.py b/synapse/storage/schema/delta/49/event_chunks.py

index 7d8d711600..50040a779c 100644 --- a/synapse/storage/schema/delta/49/event_chunks.py +++ b/synapse/storage/schema/delta/49/event_chunks.py
@@ -53,11 +53,23 @@ CREATE INDEX chunk_backwards_extremities_event_id ON chunk_backwards_extremities CREATE TABLE chunk_linearized ( chunk_id BIGINT NOT NULL, room_id TEXT NOT NULL, - ordering DOUBLE PRECISION NOT NULL + next_chunk_id BIGINT, -- The chunk directly after this chunk, or NULL if last chunk + numerator BIGINT NOT NULL, + denominator BIGINT NOT NULL ); CREATE UNIQUE INDEX chunk_linearized_id ON chunk_linearized (chunk_id); -CREATE INDEX chunk_linearized_ordering ON chunk_linearized (room_id, ordering); +CREATE UNIQUE INDEX chunk_linearized_next_id ON chunk_linearized ( + next_chunk_id, room_id +); + +-- Records the first chunk in a room. +CREATE TABLE chunk_linearized_first ( + chunk_id BIGINT NOT NULL, + room_id TEXT NOT NULL +); + +CREATE UNIQUE INDEX chunk_linearized_first_id ON chunk_linearized_first (room_id); INSERT into background_updates (update_name, progress_json) VALUES ('event_fields_chunk_id', '{}'); @@ -69,10 +81,6 @@ def run_create(cur, database_engine, *args, **kwargs): for statement in get_statements(SQL.splitlines()): cur.execute(statement) - # We now go through and assign chunk IDs for all forward extremities. - # Note that we know that extremities can't reference each other, so we - # can simply assign each event a new chunk ID with an arbitrary order. - txn = LoggingTransaction( cur, "schema_update", database_engine, [], [], ) @@ -86,6 +94,7 @@ def run_create(cur, database_engine, *args, **kwargs): next_chunk_id = 1 room_to_next_order = {} + prev_chunks_by_room = {} for row in rows: chunk_id = next_chunk_id @@ -101,19 +110,41 @@ def run_create(cur, database_engine, *args, **kwargs): updatevalues={"chunk_id": chunk_id}, ) - ordering = room_to_next_order.get(room_id, 0) + ordering = room_to_next_order.get(room_id, 1) room_to_next_order[room_id] = ordering + 1 + prev_chunks = prev_chunks_by_room.setdefault(room_id, []) + SQLBaseStore._simple_insert_txn( txn, table="chunk_linearized", values={ "chunk_id": chunk_id, "room_id": row["room_id"], - "ordering": 0, + "numerator": ordering, + "denominator": 1, }, ) + if prev_chunks: + SQLBaseStore._simple_update_one_txn( + txn, + table="chunk_linearized", + keyvalues={"chunk_id": prev_chunks[-1]}, + updatevalues={"next_chunk_id": chunk_id}, + ) + else: + SQLBaseStore._simple_insert_txn( + txn, + table="chunk_linearized_first", + values={ + "chunk_id": chunk_id, + "room_id": row["room_id"], + }, + ) + + prev_chunks.append(chunk_id) + def run_upgrade(*args, **kwargs): pass