diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index e7443f2838..c46b653f11 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -42,6 +42,7 @@ from .end_to_end_keys import EndToEndKeyStore
from .receipts import ReceiptsStore
from .search import SearchStore
from .tags import TagsStore
+from .account_data import AccountDataStore
import logging
@@ -73,6 +74,7 @@ class DataStore(RoomMemberStore, RoomStore,
EndToEndKeyStore,
SearchStore,
TagsStore,
+ AccountDataStore,
):
def __init__(self, hs):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 218e708054..17a14e001c 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -214,7 +214,8 @@ class SQLBaseStore(object):
self._clock.looping_call(loop, 10000)
- def _new_transaction(self, conn, desc, after_callbacks, func, *args, **kwargs):
+ def _new_transaction(self, conn, desc, after_callbacks, logging_context,
+ func, *args, **kwargs):
start = time.time() * 1000
txn_id = self._TXN_ID
@@ -277,6 +278,9 @@ class SQLBaseStore(object):
end = time.time() * 1000
duration = end - start
+ if logging_context is not None:
+ logging_context.add_database_transaction(duration)
+
transaction_logger.debug("[TXN END] {%s} %f", name, duration)
self._current_txn_total_time += duration
@@ -302,7 +306,8 @@ class SQLBaseStore(object):
current_context.copy_to(context)
return self._new_transaction(
- conn, desc, after_callbacks, func, *args, **kwargs
+ conn, desc, after_callbacks, current_context,
+ func, *args, **kwargs
)
result = yield preserve_context_over_fn(
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
new file mode 100644
index 0000000000..d1829f84e8
--- /dev/null
+++ b/synapse/storage/account_data.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from twisted.internet import defer
+
+import ujson as json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class AccountDataStore(SQLBaseStore):
+
+ def get_account_data_for_user(self, user_id):
+ """Get all the client account_data for a user.
+
+ Args:
+ user_id(str): The user to get the account_data for.
+ Returns:
+ A deferred pair of a dict of global account_data and a dict
+ mapping from room_id string to per room account_data dicts.
+ """
+
+ def get_account_data_for_user_txn(txn):
+ rows = self._simple_select_list_txn(
+ txn, "account_data", {"user_id": user_id},
+ ["account_data_type", "content"]
+ )
+
+ global_account_data = {
+ row["account_data_type"]: json.loads(row["content"]) for row in rows
+ }
+
+ rows = self._simple_select_list_txn(
+ txn, "room_account_data", {"user_id": user_id},
+ ["room_id", "account_data_type", "content"]
+ )
+
+ by_room = {}
+ for row in rows:
+ room_data = by_room.setdefault(row["room_id"], {})
+ room_data[row["account_data_type"]] = json.loads(row["content"])
+
+ return (global_account_data, by_room)
+
+ return self.runInteraction(
+ "get_account_data_for_user", get_account_data_for_user_txn
+ )
+
+ def get_account_data_for_room(self, user_id, room_id):
+ """Get all the client account_data for a user for a room.
+
+ Args:
+ user_id(str): The user to get the account_data for.
+ room_id(str): The room to get the account_data for.
+ Returns:
+ A deferred dict of the room account_data
+ """
+ def get_account_data_for_room_txn(txn):
+ rows = self._simple_select_list_txn(
+ txn, "room_account_data", {"user_id": user_id, "room_id": room_id},
+ ["account_data_type", "content"]
+ )
+
+ return {
+ row["account_data_type"]: json.loads(row["content"]) for row in rows
+ }
+
+ return self.runInteraction(
+ "get_account_data_for_room", get_account_data_for_room_txn
+ )
+
+ def get_updated_account_data_for_user(self, user_id, stream_id):
+ """Get all the client account_data for a that's changed.
+
+ Args:
+ user_id(str): The user to get the account_data for.
+ stream_id(int): The point in the stream since which to get updates
+ Returns:
+ A deferred pair of a dict of global account_data and a dict
+ mapping from room_id string to per room account_data dicts.
+ """
+
+ def get_updated_account_data_for_user_txn(txn):
+ sql = (
+ "SELECT account_data_type, content FROM account_data"
+ " WHERE user_id = ? AND stream_id > ?"
+ )
+
+ txn.execute(sql, (user_id, stream_id))
+
+ global_account_data = {
+ row[0]: json.loads(row[1]) for row in txn.fetchall()
+ }
+
+ sql = (
+ "SELECT room_id, account_data_type, content FROM room_account_data"
+ " WHERE user_id = ? AND stream_id > ?"
+ )
+
+ txn.execute(sql, (user_id, stream_id))
+
+ account_data_by_room = {}
+ for row in txn.fetchall():
+ room_account_data = account_data_by_room.setdefault(row[0], {})
+ room_account_data[row[1]] = json.loads(row[2])
+
+ return (global_account_data, account_data_by_room)
+
+ return self.runInteraction(
+ "get_updated_account_data_for_user", get_updated_account_data_for_user_txn
+ )
+
+ @defer.inlineCallbacks
+ def add_account_data_to_room(self, user_id, room_id, account_data_type, content):
+ """Add some account_data to a room for a user.
+ Args:
+ user_id(str): The user to add a tag for.
+ room_id(str): The room to add a tag for.
+ account_data_type(str): The type of account_data to add.
+ content(dict): A json object to associate with the tag.
+ Returns:
+ A deferred that completes once the account_data has been added.
+ """
+ content_json = json.dumps(content)
+
+ def add_account_data_txn(txn, next_id):
+ self._simple_upsert_txn(
+ txn,
+ table="room_account_data",
+ keyvalues={
+ "user_id": user_id,
+ "room_id": room_id,
+ "account_data_type": account_data_type,
+ },
+ values={
+ "stream_id": next_id,
+ "content": content_json,
+ }
+ )
+ self._update_max_stream_id(txn, next_id)
+
+ with (yield self._account_data_id_gen.get_next(self)) as next_id:
+ yield self.runInteraction(
+ "add_room_account_data", add_account_data_txn, next_id
+ )
+
+ result = yield self._account_data_id_gen.get_max_token(self)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def add_account_data_for_user(self, user_id, account_data_type, content):
+ """Add some account_data to a room for a user.
+ Args:
+ user_id(str): The user to add a tag for.
+ account_data_type(str): The type of account_data to add.
+ content(dict): A json object to associate with the tag.
+ Returns:
+ A deferred that completes once the account_data has been added.
+ """
+ content_json = json.dumps(content)
+
+ def add_account_data_txn(txn, next_id):
+ self._simple_upsert_txn(
+ txn,
+ table="account_data",
+ keyvalues={
+ "user_id": user_id,
+ "account_data_type": account_data_type,
+ },
+ values={
+ "stream_id": next_id,
+ "content": content_json,
+ }
+ )
+ self._update_max_stream_id(txn, next_id)
+
+ with (yield self._account_data_id_gen.get_next(self)) as next_id:
+ yield self.runInteraction(
+ "add_user_account_data", add_account_data_txn, next_id
+ )
+
+ result = yield self._account_data_id_gen.get_max_token(self)
+ defer.returnValue(result)
+
+ def _update_max_stream_id(self, txn, next_id):
+ """Update the max stream_id
+
+ Args:
+ txn: The database cursor
+ next_id(int): The the revision to advance to.
+ """
+ update_max_id_sql = (
+ "UPDATE account_data_max_stream_id"
+ " SET stream_id = ?"
+ " WHERE stream_id < ?"
+ )
+ txn.execute(update_max_id_sql, (next_id, next_id))
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5d35ca90b9..fc5725097c 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -51,6 +51,14 @@ EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
class EventsStore(SQLBaseStore):
+ EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
+
+ def __init__(self, hs):
+ super(EventsStore, self).__init__(hs)
+ self.register_background_update_handler(
+ self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
+ )
+
@defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False,
is_new_state=True):
@@ -365,6 +373,7 @@ class EventsStore(SQLBaseStore):
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"content": encode_json(event.content).decode("UTF-8"),
+ "origin_server_ts": int(event.origin_server_ts),
}
for event, _ in events_and_contexts
],
@@ -640,7 +649,7 @@ class EventsStore(SQLBaseStore):
]
rows = self._new_transaction(
- conn, "do_fetch", [], self._fetch_event_rows, event_ids
+ conn, "do_fetch", [], None, self._fetch_event_rows, event_ids
)
row_dict = {
@@ -964,3 +973,71 @@ class EventsStore(SQLBaseStore):
ret = yield self.runInteraction("count_messages", _count_messages)
defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def _background_reindex_origin_server_ts(self, progress, batch_size):
+ target_min_stream_id = progress["target_min_stream_id_inclusive"]
+ max_stream_id = progress["max_stream_id_exclusive"]
+ rows_inserted = progress.get("rows_inserted", 0)
+
+ INSERT_CLUMP_SIZE = 1000
+
+ def reindex_search_txn(txn):
+ sql = (
+ "SELECT stream_ordering, event_id FROM events"
+ " WHERE ? <= stream_ordering AND stream_ordering < ?"
+ " ORDER BY stream_ordering DESC"
+ " LIMIT ?"
+ )
+
+ txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
+
+ rows = txn.fetchall()
+ if not rows:
+ return 0
+
+ min_stream_id = rows[-1][0]
+ event_ids = [row[1] for row in rows]
+
+ events = self._get_events_txn(txn, event_ids)
+
+ rows = []
+ for event in events:
+ try:
+ event_id = event.event_id
+ origin_server_ts = event.origin_server_ts
+ except (KeyError, AttributeError):
+ # If the event is missing a necessary field then
+ # skip over it.
+ continue
+
+ rows.append((origin_server_ts, event_id))
+
+ sql = (
+ "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
+ )
+
+ for index in range(0, len(rows), INSERT_CLUMP_SIZE):
+ clump = rows[index:index + INSERT_CLUMP_SIZE]
+ txn.executemany(sql, clump)
+
+ progress = {
+ "target_min_stream_id_inclusive": target_min_stream_id,
+ "max_stream_id_exclusive": min_stream_id,
+ "rows_inserted": rows_inserted + len(rows)
+ }
+
+ self._background_update_progress_txn(
+ txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
+ )
+
+ return len(rows)
+
+ result = yield self.runInteraction(
+ self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
+ )
+
+ if not result:
+ yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
+
+ defer.returnValue(result)
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 9800fd4203..16eff62544 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 26
+SCHEMA_VERSION = 27
dir_path = os.path.abspath(os.path.dirname(__file__))
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 2e5eddd259..09a05b08ef 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -258,10 +258,10 @@ class RegistrationStore(SQLBaseStore):
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
- "user_id": user_id,
"medium": medium,
"address": address,
}, {
+ "user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index ae1ad56d9a..4e0e9ab59a 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -18,7 +18,7 @@ from twisted.internet import defer
from collections import namedtuple
from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cached
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.api.constants import Membership
from synapse.types import UserID
@@ -121,7 +121,7 @@ class RoomMemberStore(SQLBaseStore):
return self.get_rooms_for_user_where_membership_is(
user_id, [Membership.INVITE]
).addCallback(lambda invites: self._get_events([
- invites.event_id for invite in invites
+ invite.event_id for invite in invites
]))
def get_leave_and_ban_events_for_user(self, user_id):
@@ -160,7 +160,7 @@ class RoomMemberStore(SQLBaseStore):
def _get_rooms_for_user_where_membership_is_txn(self, txn, user_id,
membership_list):
- where_clause = "user_id = ? AND (%s)" % (
+ where_clause = "user_id = ? AND (%s) AND forgotten = 0" % (
" OR ".join(["membership = ?" for _ in membership_list]),
)
@@ -269,3 +269,70 @@ class RoomMemberStore(SQLBaseStore):
ret = len(room_id_lists.pop(0).intersection(*room_id_lists)) > 0
defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def forget(self, user_id, room_id):
+ """Indicate that user_id wishes to discard history for room_id."""
+ def f(txn):
+ sql = (
+ "UPDATE"
+ " room_memberships"
+ " SET"
+ " forgotten = 1"
+ " WHERE"
+ " user_id = ?"
+ " AND"
+ " room_id = ?"
+ )
+ txn.execute(sql, (user_id, room_id))
+ yield self.runInteraction("forget_membership", f)
+ self.was_forgotten_at.invalidate_all()
+ self.did_forget.invalidate((user_id, room_id))
+
+ @cachedInlineCallbacks(num_args=2)
+ def did_forget(self, user_id, room_id):
+ """Returns whether user_id has elected to discard history for room_id.
+
+ Returns False if they have since re-joined."""
+ def f(txn):
+ sql = (
+ "SELECT"
+ " COUNT(*)"
+ " FROM"
+ " room_memberships"
+ " WHERE"
+ " user_id = ?"
+ " AND"
+ " room_id = ?"
+ " AND"
+ " forgotten = 0"
+ )
+ txn.execute(sql, (user_id, room_id))
+ rows = txn.fetchall()
+ return rows[0][0]
+ count = yield self.runInteraction("did_forget_membership", f)
+ defer.returnValue(count == 0)
+
+ @cachedInlineCallbacks(num_args=3)
+ def was_forgotten_at(self, user_id, room_id, event_id):
+ """Returns whether user_id has elected to discard history for room_id at event_id.
+
+ event_id must be a membership event."""
+ def f(txn):
+ sql = (
+ "SELECT"
+ " forgotten"
+ " FROM"
+ " room_memberships"
+ " WHERE"
+ " user_id = ?"
+ " AND"
+ " room_id = ?"
+ " AND"
+ " event_id = ?"
+ )
+ txn.execute(sql, (user_id, room_id, event_id))
+ rows = txn.fetchall()
+ return rows[0][0]
+ forgot = yield self.runInteraction("did_forget_membership_at", f)
+ defer.returnValue(forgot == 1)
diff --git a/synapse/storage/schema/delta/15/v15.sql b/synapse/storage/schema/delta/15/v15.sql
index f5b2a08ca4..9523d2bcc3 100644
--- a/synapse/storage/schema/delta/15/v15.sql
+++ b/synapse/storage/schema/delta/15/v15.sql
@@ -1,23 +1,22 @@
-- Drop, copy & recreate pushers table to change unique key
-- Also add access_token column at the same time
CREATE TABLE IF NOT EXISTS pushers2 (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
+ id BIGINT PRIMARY KEY,
user_name TEXT NOT NULL,
- access_token INTEGER DEFAULT NULL,
- profile_tag varchar(32) NOT NULL,
- kind varchar(8) NOT NULL,
- app_id varchar(64) NOT NULL,
- app_display_name varchar(64) NOT NULL,
- device_display_name varchar(128) NOT NULL,
- pushkey blob NOT NULL,
+ access_token BIGINT DEFAULT NULL,
+ profile_tag VARCHAR(32) NOT NULL,
+ kind VARCHAR(8) NOT NULL,
+ app_id VARCHAR(64) NOT NULL,
+ app_display_name VARCHAR(64) NOT NULL,
+ device_display_name VARCHAR(128) NOT NULL,
+ pushkey bytea NOT NULL,
ts BIGINT NOT NULL,
- lang varchar(8),
- data blob,
+ lang VARCHAR(8),
+ data bytea,
last_token TEXT,
last_success BIGINT,
failing_since BIGINT,
- FOREIGN KEY(user_name) REFERENCES users(name),
- UNIQUE (app_id, pushkey, user_name)
+ UNIQUE (app_id, pushkey)
);
INSERT INTO pushers2 (id, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, ts, lang, data, last_token, last_success, failing_since)
SELECT id, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, ts, lang, data, last_token, last_success, failing_since FROM pushers;
diff --git a/synapse/storage/schema/delta/25/fts.py b/synapse/storage/schema/delta/25/fts.py
index 5239d69073..ba48e43792 100644
--- a/synapse/storage/schema/delta/25/fts.py
+++ b/synapse/storage/schema/delta/25/fts.py
@@ -38,7 +38,7 @@ CREATE INDEX event_search_ev_ridx ON event_search(room_id);
SQLITE_TABLE = (
- "CREATE VIRTUAL TABLE IF NOT EXISTS event_search"
+ "CREATE VIRTUAL TABLE event_search"
" USING fts4 ( event_id, room_id, sender, key, value )"
)
diff --git a/synapse/storage/schema/delta/27/account_data.sql b/synapse/storage/schema/delta/27/account_data.sql
new file mode 100644
index 0000000000..9f25416005
--- /dev/null
+++ b/synapse/storage/schema/delta/27/account_data.sql
@@ -0,0 +1,36 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS account_data(
+ user_id TEXT NOT NULL,
+ account_data_type TEXT NOT NULL, -- The type of the account_data.
+ stream_id BIGINT NOT NULL, -- The version of the account_data.
+ content TEXT NOT NULL, -- The JSON content of the account_data
+ CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type)
+);
+
+
+CREATE TABLE IF NOT EXISTS room_account_data(
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ account_data_type TEXT NOT NULL, -- The type of the account_data.
+ stream_id BIGINT NOT NULL, -- The version of the account_data.
+ content TEXT NOT NULL, -- The JSON content of the account_data
+ CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type)
+);
+
+
+CREATE INDEX account_data_stream_id on account_data(user_id, stream_id);
+CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id);
diff --git a/synapse/storage/schema/delta/27/forgotten_memberships.sql b/synapse/storage/schema/delta/27/forgotten_memberships.sql
new file mode 100644
index 0000000000..beeb8a288b
--- /dev/null
+++ b/synapse/storage/schema/delta/27/forgotten_memberships.sql
@@ -0,0 +1,26 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Keeps track of what rooms users have left and don't want to be able to
+ * access again.
+ *
+ * If all users on this server have left a room, we can delete the room
+ * entirely.
+ *
+ * This column should always contain either 0 or 1.
+ */
+
+ ALTER TABLE room_memberships ADD COLUMN forgotten INTEGER DEFAULT 0;
diff --git a/synapse/storage/schema/delta/27/ts.py b/synapse/storage/schema/delta/27/ts.py
new file mode 100644
index 0000000000..8d4a981975
--- /dev/null
+++ b/synapse/storage/schema/delta/27/ts.py
@@ -0,0 +1,57 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.storage.prepare_database import get_statements
+
+import ujson
+
+logger = logging.getLogger(__name__)
+
+
+ALTER_TABLE = (
+ "ALTER TABLE events ADD COLUMN origin_server_ts BIGINT;"
+ "CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);"
+)
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ for statement in get_statements(ALTER_TABLE.splitlines()):
+ cur.execute(statement)
+
+ cur.execute("SELECT MIN(stream_ordering) FROM events")
+ rows = cur.fetchall()
+ min_stream_id = rows[0][0]
+
+ cur.execute("SELECT MAX(stream_ordering) FROM events")
+ rows = cur.fetchall()
+ max_stream_id = rows[0][0]
+
+ if min_stream_id is not None and max_stream_id is not None:
+ progress = {
+ "target_min_stream_id_inclusive": min_stream_id,
+ "max_stream_id_exclusive": max_stream_id + 1,
+ "rows_inserted": 0,
+ }
+ progress_json = ujson.dumps(progress)
+
+ sql = (
+ "INSERT into background_updates (update_name, progress_json)"
+ " VALUES (?, ?)"
+ )
+
+ sql = database_engine.convert_param_style(sql)
+
+ cur.execute(sql, ("event_origin_server_ts", progress_json))
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 380270b009..6cb5e73b6e 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -20,6 +20,7 @@ from synapse.api.errors import SynapseError
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
import logging
+import re
logger = logging.getLogger(__name__)
@@ -84,6 +85,11 @@ class SearchStore(BackgroundUpdateStore):
# skip over it.
continue
+ if not isinstance(value, basestring):
+ # If the event body, name or topic isn't a string
+ # then skip over it
+ continue
+
event_search_rows.append((event_id, room_id, key, value))
if isinstance(self.database_engine, PostgresEngine):
@@ -139,6 +145,9 @@ class SearchStore(BackgroundUpdateStore):
list of dicts
"""
clauses = []
+
+ search_query = search_query = _parse_query(self.database_engine, search_term)
+
args = []
# Make sure we don't explode because the person is in too many rooms.
@@ -158,18 +167,36 @@ class SearchStore(BackgroundUpdateStore):
"(%s)" % (" OR ".join(local_clauses),)
)
+ count_args = args
+ count_clauses = clauses
+
if isinstance(self.database_engine, PostgresEngine):
sql = (
- "SELECT ts_rank_cd(vector, query) AS rank, room_id, event_id"
- " FROM plainto_tsquery('english', ?) as query, event_search"
- " WHERE vector @@ query"
+ "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) AS rank,"
+ " room_id, event_id"
+ " FROM event_search"
+ " WHERE vector @@ to_tsquery('english', ?)"
+ )
+ args = [search_query, search_query] + args
+
+ count_sql = (
+ "SELECT room_id, count(*) as count FROM event_search"
+ " WHERE vector @@ to_tsquery('english', ?)"
)
+ count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
sql = (
"SELECT rank(matchinfo(event_search)) as rank, room_id, event_id"
" FROM event_search"
" WHERE value MATCH ?"
)
+ args = [search_query] + args
+
+ count_sql = (
+ "SELECT room_id, count(*) as count FROM event_search"
+ " WHERE value MATCH ?"
+ )
+ count_args = [search_term] + count_args
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
@@ -177,12 +204,15 @@ class SearchStore(BackgroundUpdateStore):
for clause in clauses:
sql += " AND " + clause
+ for clause in count_clauses:
+ count_sql += " AND " + clause
+
# We add an arbitrary limit here to ensure we don't try to pull the
# entire table from the database.
sql += " ORDER BY rank DESC LIMIT 500"
results = yield self._execute(
- "search_msgs", self.cursor_to_dict, sql, *([search_term] + args)
+ "search_msgs", self.cursor_to_dict, sql, *args
)
results = filter(lambda row: row["room_id"] in room_ids, results)
@@ -194,21 +224,37 @@ class SearchStore(BackgroundUpdateStore):
for ev in events
}
- defer.returnValue([
- {
- "event": event_map[r["event_id"]],
- "rank": r["rank"],
- }
- for r in results
- if r["event_id"] in event_map
- ])
+ highlights = None
+ if isinstance(self.database_engine, PostgresEngine):
+ highlights = yield self._find_highlights_in_postgres(search_query, events)
+
+ count_sql += " GROUP BY room_id"
+
+ count_results = yield self._execute(
+ "search_rooms_count", self.cursor_to_dict, count_sql, *count_args
+ )
+
+ count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
+
+ defer.returnValue({
+ "results": [
+ {
+ "event": event_map[r["event_id"]],
+ "rank": r["rank"],
+ }
+ for r in results
+ if r["event_id"] in event_map
+ ],
+ "highlights": highlights,
+ "count": count,
+ })
@defer.inlineCallbacks
- def search_room(self, room_id, search_term, keys, limit, pagination_token=None):
+ def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None):
"""Performs a full text search over events with given keys.
Args:
- room_id (str): The room_id to search in
+ room_id (list): The room_ids to search in
search_term (str): Search term to search for
keys (list): List of keys to search in, currently supports
"content.body", "content.name", "content.topic"
@@ -218,7 +264,18 @@ class SearchStore(BackgroundUpdateStore):
list of dicts
"""
clauses = []
- args = [search_term, room_id]
+
+ search_query = search_query = _parse_query(self.database_engine, search_term)
+
+ args = []
+
+ # Make sure we don't explode because the person is in too many rooms.
+ # We filter the results below regardless.
+ if len(room_ids) < 500:
+ clauses.append(
+ "room_id IN (%s)" % (",".join(["?"] * len(room_ids)),)
+ )
+ args.extend(room_ids)
local_clauses = []
for key in keys:
@@ -229,28 +286,40 @@ class SearchStore(BackgroundUpdateStore):
"(%s)" % (" OR ".join(local_clauses),)
)
+ # take copies of the current args and clauses lists, before adding
+ # pagination clauses to main query.
+ count_args = list(args)
+ count_clauses = list(clauses)
+
if pagination_token:
try:
- topo, stream = pagination_token.split(",")
- topo = int(topo)
+ origin_server_ts, stream = pagination_token.split(",")
+ origin_server_ts = int(origin_server_ts)
stream = int(stream)
except:
raise SynapseError(400, "Invalid pagination token")
clauses.append(
- "(topological_ordering < ?"
- " OR (topological_ordering = ? AND stream_ordering < ?))"
+ "(origin_server_ts < ?"
+ " OR (origin_server_ts = ? AND stream_ordering < ?))"
)
- args.extend([topo, topo, stream])
+ args.extend([origin_server_ts, origin_server_ts, stream])
if isinstance(self.database_engine, PostgresEngine):
sql = (
- "SELECT ts_rank_cd(vector, query) as rank,"
- " topological_ordering, stream_ordering, room_id, event_id"
- " FROM plainto_tsquery('english', ?) as query, event_search"
+ "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) as rank,"
+ " origin_server_ts, stream_ordering, room_id, event_id"
+ " FROM event_search"
" NATURAL JOIN events"
- " WHERE vector @@ query AND room_id = ?"
+ " WHERE vector @@ to_tsquery('english', ?) AND "
+ )
+ args = [search_query, search_query] + args
+
+ count_sql = (
+ "SELECT room_id, count(*) as count FROM event_search"
+ " WHERE vector @@ to_tsquery('english', ?) AND "
)
+ count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
# We use CROSS JOIN here to ensure we use the right indexes.
# https://sqlite.org/optoverview.html#crossjoin
@@ -262,24 +331,31 @@ class SearchStore(BackgroundUpdateStore):
# MATCH unless it uses the full text search index
sql = (
"SELECT rank(matchinfo) as rank, room_id, event_id,"
- " topological_ordering, stream_ordering"
+ " origin_server_ts, stream_ordering"
" FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo"
" FROM event_search"
" WHERE value MATCH ?"
" )"
" CROSS JOIN events USING (event_id)"
- " WHERE room_id = ?"
+ " WHERE "
)
+ args = [search_query] + args
+
+ count_sql = (
+ "SELECT room_id, count(*) as count FROM event_search"
+ " WHERE value MATCH ? AND "
+ )
+ count_args = [search_term] + count_args
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
- for clause in clauses:
- sql += " AND " + clause
+ sql += " AND ".join(clauses)
+ count_sql += " AND ".join(count_clauses)
# We add an arbitrary limit here to ensure we don't try to pull the
# entire table from the database.
- sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+ sql += " ORDER BY origin_server_ts DESC, stream_ordering DESC LIMIT ?"
args.append(limit)
@@ -287,6 +363,8 @@ class SearchStore(BackgroundUpdateStore):
"search_rooms", self.cursor_to_dict, sql, *args
)
+ results = filter(lambda row: row["room_id"] in room_ids, results)
+
events = yield self._get_events([r["event_id"] for r in results])
event_map = {
@@ -294,14 +372,119 @@ class SearchStore(BackgroundUpdateStore):
for ev in events
}
- defer.returnValue([
- {
- "event": event_map[r["event_id"]],
- "rank": r["rank"],
- "pagination_token": "%s,%s" % (
- r["topological_ordering"], r["stream_ordering"]
- ),
- }
- for r in results
- if r["event_id"] in event_map
- ])
+ highlights = None
+ if isinstance(self.database_engine, PostgresEngine):
+ highlights = yield self._find_highlights_in_postgres(search_query, events)
+
+ count_sql += " GROUP BY room_id"
+
+ count_results = yield self._execute(
+ "search_rooms_count", self.cursor_to_dict, count_sql, *count_args
+ )
+
+ count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
+
+ defer.returnValue({
+ "results": [
+ {
+ "event": event_map[r["event_id"]],
+ "rank": r["rank"],
+ "pagination_token": "%s,%s" % (
+ r["origin_server_ts"], r["stream_ordering"]
+ ),
+ }
+ for r in results
+ if r["event_id"] in event_map
+ ],
+ "highlights": highlights,
+ "count": count,
+ })
+
+ def _find_highlights_in_postgres(self, search_query, events):
+ """Given a list of events and a search term, return a list of words
+ that match from the content of the event.
+
+ This is used to give a list of words that clients can match against to
+ highlight the matching parts.
+
+ Args:
+ search_query (str)
+ events (list): A list of events
+
+ Returns:
+ deferred : A set of strings.
+ """
+ def f(txn):
+ highlight_words = set()
+ for event in events:
+ # As a hack we simply join values of all possible keys. This is
+ # fine since we're only using them to find possible highlights.
+ values = []
+ for key in ("body", "name", "topic"):
+ v = event.content.get(key, None)
+ if v:
+ values.append(v)
+
+ if not values:
+ continue
+
+ value = " ".join(values)
+
+ # We need to find some values for StartSel and StopSel that
+ # aren't in the value so that we can pick results out.
+ start_sel = "<"
+ stop_sel = ">"
+
+ while start_sel in value:
+ start_sel += "<"
+ while stop_sel in value:
+ stop_sel += ">"
+
+ query = "SELECT ts_headline(?, to_tsquery('english', ?), %s)" % (
+ _to_postgres_options({
+ "StartSel": start_sel,
+ "StopSel": stop_sel,
+ "MaxFragments": "50",
+ })
+ )
+ txn.execute(query, (value, search_query,))
+ headline, = txn.fetchall()[0]
+
+ # Now we need to pick the possible highlights out of the haedline
+ # result.
+ matcher_regex = "%s(.*?)%s" % (
+ re.escape(start_sel),
+ re.escape(stop_sel),
+ )
+
+ res = re.findall(matcher_regex, headline)
+ highlight_words.update([r.lower() for r in res])
+
+ return highlight_words
+
+ return self.runInteraction("_find_highlights", f)
+
+
+def _to_postgres_options(options_dict):
+ return "'%s'" % (
+ ",".join("%s=%s" % (k, v) for k, v in options_dict.items()),
+ )
+
+
+def _parse_query(database_engine, search_term):
+ """Takes a plain unicode string from the user and converts it into a form
+ that can be passed to database.
+ We use this so that we can add prefix matching, which isn't something
+ that is supported by default.
+ """
+
+ # Pull out the individual words, discarding any non-word characters.
+ results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
+
+ if isinstance(database_engine, PostgresEngine):
+ return " & ".join(result + ":*" for result in results)
+ elif isinstance(database_engine, Sqlite3Engine):
+ return " & ".join(result + "*" for result in results)
+ else:
+ # This should be unreachable.
+ raise Exception("Unrecognized database engine")
diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py
index f6d826cc59..f520f60c6c 100644
--- a/synapse/storage/tags.py
+++ b/synapse/storage/tags.py
@@ -48,8 +48,8 @@ class TagsStore(SQLBaseStore):
Args:
user_id(str): The user to get the tags for.
Returns:
- A deferred dict mapping from room_id strings to lists of tag
- strings.
+ A deferred dict mapping from room_id strings to dicts mapping from
+ tag strings to tag content.
"""
deferred = self._simple_select_list(
|