diff --git a/jenkins.sh b/jenkins.sh
index 8d2ac63c56..0018ca610a 100755
--- a/jenkins.sh
+++ b/jenkins.sh
@@ -42,4 +42,29 @@ export PERL5LIB PERL_MB_OPT PERL_MM_OPT
./install-deps.pl
-./run-tests.pl -O tap --synapse-directory .. --all > results.tap
+: ${PORT_BASE:=8000}
+
+echo >&2 "Running sytest with SQLite3";
+./run-tests.pl -O tap --synapse-directory .. --all --port-base $PORT_BASE > results-sqlite3.tap
+
+RUN_POSTGRES=""
+
+for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
+ if psql synapse_jenkins_$port <<< ""; then
+ RUN_POSTGRES=$RUN_POSTGRES:$port
+ cat > localhost-$port/database.yaml << EOF
+name: psycopg2
+args:
+ database: synapse_jenkins_$port
+EOF
+ fi
+done
+
+# Run if both postgresql databases exist
+if test $RUN_POSTGRES = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
+ echo >&2 "Running sytest with PostgreSQL";
+ pip install psycopg2
+ ./run-tests.pl -O tap --synapse-directory .. --all --port-base $PORT_BASE > results-postgresql.tap
+else
+ echo >&2 "Skipping running sytest with PostgreSQL, $RUN_POSTGRES"
+fi
diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py
index f0d0cd8a3f..f5d4cc37fd 100755
--- a/scripts-dev/definitions.py
+++ b/scripts-dev/definitions.py
@@ -79,16 +79,16 @@ def defined_names(prefix, defs, names):
defined_names(prefix + name + ".", funcs, names)
-def used_names(prefix, defs, names):
+def used_names(prefix, item, defs, names):
for name, funcs in defs.get('def', {}).items():
- used_names(prefix + name + ".", funcs, names)
+ used_names(prefix + name + ".", name, funcs, names)
for name, funcs in defs.get('class', {}).items():
- used_names(prefix + name + ".", funcs, names)
+ used_names(prefix + name + ".", name, funcs, names)
for used in defs.get('uses', ()):
if used in names:
- names[used].setdefault('used', []).append(prefix.rstrip('.'))
+ names[used].setdefault('used', {}).setdefault(item, []).append(prefix.rstrip('.'))
if __name__ == '__main__':
@@ -109,6 +109,14 @@ if __name__ == '__main__':
"directories", nargs='+', metavar="DIR",
help="Directories to search for definitions"
)
+ parser.add_argument(
+ "--referrers", default=0, type=int,
+ help="Include referrers up to the given depth"
+ )
+ parser.add_argument(
+ "--format", default="yaml",
+ help="Output format, one of 'yaml' or 'dot'"
+ )
args = parser.parse_args()
definitions = {}
@@ -124,7 +132,7 @@ if __name__ == '__main__':
defined_names(filepath + ":", defs, names)
for filepath, defs in definitions.items():
- used_names(filepath + ":", defs, names)
+ used_names(filepath + ":", None, defs, names)
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
@@ -139,4 +147,29 @@ if __name__ == '__main__':
continue
result[name] = definition
- yaml.dump(result, sys.stdout, default_flow_style=False)
+ referrer_depth = args.referrers
+ referrers = set()
+ while referrer_depth:
+ referrer_depth -= 1
+ for entry in result.values():
+ for used_by in entry["used"]:
+ referrers.add(used_by)
+ for name, definition in names.items():
+ if not name in referrers:
+ continue
+ if ignore and any(pattern.match(name) for pattern in ignore):
+ continue
+ result[name] = definition
+
+ if args.format == 'yaml':
+ yaml.dump(result, sys.stdout, default_flow_style=False)
+ elif args.format == 'dot':
+ print "digraph {"
+ for name, entry in result.items():
+ print name
+ for used_by in entry.get("used", ()):
+ if used_by in result:
+ print used_by, "->", name
+ print "}"
+ else:
+ raise ValueError("Unknown format %r" % (args.format))
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 50688e51a8..df6390cf05 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -131,6 +131,17 @@ class SearchHandler(BaseHandler):
if batch_group == "room_id":
room_ids.intersection_update({batch_group_key})
+ if not room_ids:
+ defer.returnValue({
+ "search_categories": {
+ "room_events": {
+ "results": {},
+ "count": 0,
+ "highlights": [],
+ }
+ }
+ })
+
rank_map = {} # event_id -> rank of event
allowed_events = []
room_groups = {} # Holds result of grouping by room, if applicable
@@ -139,11 +150,18 @@ class SearchHandler(BaseHandler):
# Holds the next_batch for the entire result set if one of those exists
global_next_batch = None
+ highlights = set()
+
if order_by == "rank":
- results = yield self.store.search_msgs(
+ search_result = yield self.store.search_msgs(
room_ids, search_term, keys
)
+ if search_result["highlights"]:
+ highlights.update(search_result["highlights"])
+
+ results = search_result["results"]
+
results_map = {r["event"].event_id: r for r in results}
rank_map.update({r["event"].event_id: r["rank"] for r in results})
@@ -171,80 +189,67 @@ class SearchHandler(BaseHandler):
s["results"].append(e.event_id)
elif order_by == "recent":
- # In this case we specifically loop through each room as the given
- # limit applies to each room, rather than a global list.
- # This is not necessarilly a good idea.
- for room_id in room_ids:
- room_events = []
- if batch_group == "room_id" and batch_group_key == room_id:
- pagination_token = batch_token
- else:
- pagination_token = None
- i = 0
-
- # We keep looping and we keep filtering until we reach the limit
- # or we run out of things.
- # But only go around 5 times since otherwise synapse will be sad.
- while len(room_events) < search_filter.limit() and i < 5:
- i += 1
- results = yield self.store.search_room(
- room_id, search_term, keys, search_filter.limit() * 2,
- pagination_token=pagination_token,
- )
+ room_events = []
+ i = 0
+
+ pagination_token = batch_token
+
+ # We keep looping and we keep filtering until we reach the limit
+ # or we run out of things.
+ # But only go around 5 times since otherwise synapse will be sad.
+ while len(room_events) < search_filter.limit() and i < 5:
+ i += 1
+ search_result = yield self.store.search_rooms(
+ room_ids, search_term, keys, search_filter.limit() * 2,
+ pagination_token=pagination_token,
+ )
- results_map = {r["event"].event_id: r for r in results}
+ if search_result["highlights"]:
+ highlights.update(search_result["highlights"])
- rank_map.update({r["event"].event_id: r["rank"] for r in results})
+ results = search_result["results"]
- filtered_events = search_filter.filter([
- r["event"] for r in results
- ])
+ results_map = {r["event"].event_id: r for r in results}
- events = yield self._filter_events_for_client(
- user.to_string(), filtered_events
- )
+ rank_map.update({r["event"].event_id: r["rank"] for r in results})
- room_events.extend(events)
- room_events = room_events[:search_filter.limit()]
+ filtered_events = search_filter.filter([
+ r["event"] for r in results
+ ])
- if len(results) < search_filter.limit() * 2:
- pagination_token = None
- break
- else:
- pagination_token = results[-1]["pagination_token"]
-
- if room_events:
- res = results_map[room_events[-1].event_id]
- pagination_token = res["pagination_token"]
-
- group = room_groups.setdefault(room_id, {})
- if pagination_token:
- next_batch = encode_base64("%s\n%s\n%s" % (
- "room_id", room_id, pagination_token
- ))
- group["next_batch"] = next_batch
-
- if batch_token:
- global_next_batch = next_batch
-
- group["results"] = [e.event_id for e in room_events]
- group["order"] = max(
- e.origin_server_ts/1000 for e in room_events
- if hasattr(e, "origin_server_ts")
- )
-
- allowed_events.extend(room_events)
+ events = yield self._filter_events_for_client(
+ user.to_string(), filtered_events
+ )
- # Normalize the group orders
- if room_groups:
- if len(room_groups) > 1:
- mx = max(g["order"] for g in room_groups.values())
- mn = min(g["order"] for g in room_groups.values())
+ room_events.extend(events)
+ room_events = room_events[:search_filter.limit()]
- for g in room_groups.values():
- g["order"] = (g["order"] - mn) * 1.0 / (mx - mn)
+ if len(results) < search_filter.limit() * 2:
+ pagination_token = None
+ break
else:
- room_groups.values()[0]["order"] = 1
+ pagination_token = results[-1]["pagination_token"]
+
+ for event in room_events:
+ group = room_groups.setdefault(event.room_id, {
+ "results": [],
+ })
+ group["results"].append(event.event_id)
+
+ if room_events and len(room_events) >= search_filter.limit():
+ last_event_id = room_events[-1].event_id
+ pagination_token = results_map[last_event_id]["pagination_token"]
+
+ global_next_batch = encode_base64("%s\n%s\n%s" % (
+ "all", "", pagination_token
+ ))
+
+ for room_id, group in room_groups.items():
+ group["next_batch"] = encode_base64("%s\n%s\n%s" % (
+ "room_id", room_id, pagination_token
+ ))
+
+ allowed_events.extend(room_events)
else:
# We should never get here due to the guard earlier.
@@ -347,7 +352,8 @@ class SearchHandler(BaseHandler):
rooms_cat_res = {
"results": results,
- "count": len(results)
+ "count": len(results),
+ "highlights": list(highlights),
}
if state_results:
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5d35ca90b9..7088f2709b 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -51,6 +51,14 @@ EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
class EventsStore(SQLBaseStore):
+ EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
+
+ def __init__(self, hs):
+ super(EventsStore, self).__init__(hs)
+ self.register_background_update_handler(
+ self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
+ )
+
@defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False,
is_new_state=True):
@@ -365,6 +373,7 @@ class EventsStore(SQLBaseStore):
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"content": encode_json(event.content).decode("UTF-8"),
+ "origin_server_ts": int(event.origin_server_ts),
}
for event, _ in events_and_contexts
],
@@ -964,3 +973,71 @@ class EventsStore(SQLBaseStore):
ret = yield self.runInteraction("count_messages", _count_messages)
defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def _background_reindex_origin_server_ts(self, progress, batch_size):
+ target_min_stream_id = progress["target_min_stream_id_inclusive"]
+ max_stream_id = progress["max_stream_id_exclusive"]
+ rows_inserted = progress.get("rows_inserted", 0)
+
+ INSERT_CLUMP_SIZE = 1000
+
+ def reindex_search_txn(txn):
+ sql = (
+ "SELECT stream_ordering, event_id FROM events"
+ " WHERE ? <= stream_ordering AND stream_ordering < ?"
+ " ORDER BY stream_ordering DESC"
+ " LIMIT ?"
+ )
+
+ txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
+
+ rows = txn.fetchall()
+ if not rows:
+ return 0
+
+ min_stream_id = rows[-1][0]
+ event_ids = [row[1] for row in rows]
+
+ events = self._get_events_txn(txn, event_ids)
+
+ rows = []
+ for event in events:
+ try:
+ event_id = event.event_id
+ origin_server_ts = event.origin_server_ts
+ except (KeyError, AttributeError):
+ # If the event is missing a necessary field then
+ # skip over it.
+ continue
+
+ rows.append((origin_server_ts, event_id))
+
+ sql = (
+ "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
+ )
+
+ for index in range(0, len(rows), INSERT_CLUMP_SIZE):
+ clump = rows[index:index + INSERT_CLUMP_SIZE]
+ txn.executemany(sql, clump)
+
+ progress = {
+ "target_min_stream_id_inclusive": target_min_stream_id,
+ "max_stream_id_exclusive": min_stream_id,
+ "rows_inserted": rows_inserted + len(rows)
+ }
+
+ self._background_update_progress_txn(
+ txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
+ )
+
+ return len(rows)
+
+ result = yield self.runInteraction(
+ self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
+ )
+
+ if not result:
+ yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
+
+ defer.returnValue(result)
diff --git a/synapse/storage/schema/delta/26/ts.py b/synapse/storage/schema/delta/26/ts.py
new file mode 100644
index 0000000000..8d4a981975
--- /dev/null
+++ b/synapse/storage/schema/delta/26/ts.py
@@ -0,0 +1,57 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.storage.prepare_database import get_statements
+
+import ujson
+
+logger = logging.getLogger(__name__)
+
+
+ALTER_TABLE = (
+ "ALTER TABLE events ADD COLUMN origin_server_ts BIGINT;"
+ "CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);"
+)
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ for statement in get_statements(ALTER_TABLE.splitlines()):
+ cur.execute(statement)
+
+ cur.execute("SELECT MIN(stream_ordering) FROM events")
+ rows = cur.fetchall()
+ min_stream_id = rows[0][0]
+
+ cur.execute("SELECT MAX(stream_ordering) FROM events")
+ rows = cur.fetchall()
+ max_stream_id = rows[0][0]
+
+ if min_stream_id is not None and max_stream_id is not None:
+ progress = {
+ "target_min_stream_id_inclusive": min_stream_id,
+ "max_stream_id_exclusive": max_stream_id + 1,
+ "rows_inserted": 0,
+ }
+ progress_json = ujson.dumps(progress)
+
+ sql = (
+ "INSERT into background_updates (update_name, progress_json)"
+ " VALUES (?, ?)"
+ )
+
+ sql = database_engine.convert_param_style(sql)
+
+ cur.execute(sql, ("event_origin_server_ts", progress_json))
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 380270b009..20a62d07ff 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -20,6 +20,7 @@ from synapse.api.errors import SynapseError
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
import logging
+import re
logger = logging.getLogger(__name__)
@@ -194,21 +195,28 @@ class SearchStore(BackgroundUpdateStore):
for ev in events
}
- defer.returnValue([
- {
- "event": event_map[r["event_id"]],
- "rank": r["rank"],
- }
- for r in results
- if r["event_id"] in event_map
- ])
+ highlights = None
+ if isinstance(self.database_engine, PostgresEngine):
+ highlights = yield self._find_highlights_in_postgres(search_term, events)
+
+ defer.returnValue({
+ "results": [
+ {
+ "event": event_map[r["event_id"]],
+ "rank": r["rank"],
+ }
+ for r in results
+ if r["event_id"] in event_map
+ ],
+ "highlights": highlights,
+ })
@defer.inlineCallbacks
- def search_room(self, room_id, search_term, keys, limit, pagination_token=None):
+ def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None):
"""Performs a full text search over events with given keys.
Args:
- room_id (str): The room_id to search in
+ room_id (list): The room_ids to search in
search_term (str): Search term to search for
keys (list): List of keys to search in, currently supports
"content.body", "content.name", "content.topic"
@@ -218,7 +226,15 @@ class SearchStore(BackgroundUpdateStore):
list of dicts
"""
clauses = []
- args = [search_term, room_id]
+ args = [search_term]
+
+ # Make sure we don't explode because the person is in too many rooms.
+ # We filter the results below regardless.
+ if len(room_ids) < 500:
+ clauses.append(
+ "room_id IN (%s)" % (",".join(["?"] * len(room_ids)),)
+ )
+ args.extend(room_ids)
local_clauses = []
for key in keys:
@@ -231,25 +247,25 @@ class SearchStore(BackgroundUpdateStore):
if pagination_token:
try:
- topo, stream = pagination_token.split(",")
- topo = int(topo)
+ origin_server_ts, stream = pagination_token.split(",")
+ origin_server_ts = int(origin_server_ts)
stream = int(stream)
except:
raise SynapseError(400, "Invalid pagination token")
clauses.append(
- "(topological_ordering < ?"
- " OR (topological_ordering = ? AND stream_ordering < ?))"
+ "(origin_server_ts < ?"
+ " OR (origin_server_ts = ? AND stream_ordering < ?))"
)
- args.extend([topo, topo, stream])
+ args.extend([origin_server_ts, origin_server_ts, stream])
if isinstance(self.database_engine, PostgresEngine):
sql = (
"SELECT ts_rank_cd(vector, query) as rank,"
- " topological_ordering, stream_ordering, room_id, event_id"
+ " origin_server_ts, stream_ordering, room_id, event_id"
" FROM plainto_tsquery('english', ?) as query, event_search"
" NATURAL JOIN events"
- " WHERE vector @@ query AND room_id = ?"
+ " WHERE vector @@ query AND "
)
elif isinstance(self.database_engine, Sqlite3Engine):
# We use CROSS JOIN here to ensure we use the right indexes.
@@ -262,24 +278,23 @@ class SearchStore(BackgroundUpdateStore):
# MATCH unless it uses the full text search index
sql = (
"SELECT rank(matchinfo) as rank, room_id, event_id,"
- " topological_ordering, stream_ordering"
+ " origin_server_ts, stream_ordering"
" FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo"
" FROM event_search"
" WHERE value MATCH ?"
" )"
" CROSS JOIN events USING (event_id)"
- " WHERE room_id = ?"
+ " WHERE "
)
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
- for clause in clauses:
- sql += " AND " + clause
+ sql += " AND ".join(clauses)
# We add an arbitrary limit here to ensure we don't try to pull the
# entire table from the database.
- sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+ sql += " ORDER BY origin_server_ts DESC, stream_ordering DESC LIMIT ?"
args.append(limit)
@@ -287,6 +302,8 @@ class SearchStore(BackgroundUpdateStore):
"search_rooms", self.cursor_to_dict, sql, *args
)
+ results = filter(lambda row: row["room_id"] in room_ids, results)
+
events = yield self._get_events([r["event_id"] for r in results])
event_map = {
@@ -294,14 +311,91 @@ class SearchStore(BackgroundUpdateStore):
for ev in events
}
- defer.returnValue([
- {
- "event": event_map[r["event_id"]],
- "rank": r["rank"],
- "pagination_token": "%s,%s" % (
- r["topological_ordering"], r["stream_ordering"]
- ),
- }
- for r in results
- if r["event_id"] in event_map
- ])
+ highlights = None
+ if isinstance(self.database_engine, PostgresEngine):
+ highlights = yield self._find_highlights_in_postgres(search_term, events)
+
+ defer.returnValue({
+ "results": [
+ {
+ "event": event_map[r["event_id"]],
+ "rank": r["rank"],
+ "pagination_token": "%s,%s" % (
+ r["origin_server_ts"], r["stream_ordering"]
+ ),
+ }
+ for r in results
+ if r["event_id"] in event_map
+ ],
+ "highlights": highlights,
+ })
+
+ def _find_highlights_in_postgres(self, search_term, events):
+ """Given a list of events and a search term, return a list of words
+ that match from the content of the event.
+
+ This is used to give a list of words that clients can match against to
+ highlight the matching parts.
+
+ Args:
+ search_term (str)
+ events (list): A list of events
+
+ Returns:
+ deferred : A set of strings.
+ """
+ def f(txn):
+ highlight_words = set()
+ for event in events:
+ # As a hack we simply join values of all possible keys. This is
+ # fine since we're only using them to find possible highlights.
+ values = []
+ for key in ("body", "name", "topic"):
+ v = event.content.get(key, None)
+ if v:
+ values.append(v)
+
+ if not values:
+ continue
+
+ value = " ".join(values)
+
+ # We need to find some values for StartSel and StopSel that
+ # aren't in the value so that we can pick results out.
+ start_sel = "<"
+ stop_sel = ">"
+
+ while start_sel in value:
+ start_sel += "<"
+ while stop_sel in value:
+ stop_sel += ">"
+
+ query = "SELECT ts_headline(?, plainto_tsquery('english', ?), %s)" % (
+ _to_postgres_options({
+ "StartSel": start_sel,
+ "StopSel": stop_sel,
+ "MaxFragments": "50",
+ })
+ )
+ txn.execute(query, (value, search_term,))
+ headline, = txn.fetchall()[0]
+
+ # Now we need to pick the possible highlights out of the haedline
+ # result.
+ matcher_regex = "%s(.*?)%s" % (
+ re.escape(start_sel),
+ re.escape(stop_sel),
+ )
+
+ res = re.findall(matcher_regex, headline)
+ highlight_words.update([r.lower() for r in res])
+
+ return highlight_words
+
+ return self.runInteraction("_find_highlights", f)
+
+
+def _to_postgres_options(options_dict):
+ return "'%s'" % (
+ ",".join("%s=%s" % (k, v) for k, v in options_dict.items()),
+ )
|