summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2015-05-12 13:10:42 +0100
committerErik Johnston <erik@matrix.org>2015-05-12 13:10:42 +0100
commitda6a7bbdde73c5182a08a8f6253dc77761d9fc5f (patch)
treede515e4104613e4abbe0f523904f055078fc48af
parentFix up leak. Add warnings. (diff)
parentMerge pull request #148 from matrix-org/bugs/SYN-377 (diff)
downloadsynapse-da6a7bbdde73c5182a08a8f6253dc77761d9fc5f.tar.xz
Merge branch 'develop' of github.com:matrix-org/synapse into erikj/logging_context
-rw-r--r--AUTHORS.rst3
-rw-r--r--CHANGES.rst2
-rw-r--r--UPGRADE.rst2
-rw-r--r--docs/application_services.rst2
-rw-r--r--docs/postgres.rst6
-rw-r--r--scripts-dev/convert_server_keys.py113
-rwxr-xr-xscripts/port_from_sqlite_to_postgres.py9
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/storage/__init__.py2
-rw-r--r--synapse/storage/_base.py1
-rw-r--r--synapse/storage/events.py38
-rw-r--r--synapse/storage/push_rule.py112
-rw-r--r--synapse/storage/schema/delta/18/server_keys_bigger_ints.sql32
-rw-r--r--synapse/storage/util/id_generators.py12
14 files changed, 244 insertions, 92 deletions
diff --git a/AUTHORS.rst b/AUTHORS.rst
index 8396e535e8..3a457cd9fc 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -35,3 +35,6 @@ Turned to Dust <dwinslow86 at gmail.com>
 
 Brabo <brabo at riseup.net>
  * Installation instruction fixes
+
+Ivan Shapovalov <intelfx100 at gmail.com>
+ * contrib/systemd: a sample systemd unit file and a logger configuration
diff --git a/CHANGES.rst b/CHANGES.rst
index f0bb973dcf..65970a89c7 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -10,6 +10,8 @@ General:
 * Fix race in caches that occasionally caused some presence updates to be
   dropped - SYN-369.
 * Check server name has not changed on restart.
+* Add a sample systemd unit file and a logger configuration in
+  contrib/systemd. Contributed Ivan Shapovalov.
 
 Federation:
 
diff --git a/UPGRADE.rst b/UPGRADE.rst
index ab327a8136..d98460f643 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -1,4 +1,4 @@
-Upgrading to v0.x.x
+Upgrading to v0.9.0
 ===================
 
 Application services have had a breaking API change in this version.
diff --git a/docs/application_services.rst b/docs/application_services.rst
index a57bae6194..7e87ac9ad6 100644
--- a/docs/application_services.rst
+++ b/docs/application_services.rst
@@ -20,7 +20,7 @@ The format of the AS configuration file is as follows:
 
     url: <base url of AS>
     as_token: <token AS will add to requests to HS>
-    hs_token: <token HS will ad to requests to AS>
+    hs_token: <token HS will add to requests to AS>
     sender_localpart: <localpart of AS user>
     namespaces:
       users:  # List of users we're interested in
diff --git a/docs/postgres.rst b/docs/postgres.rst
index 2dcc3caf9e..19d8391115 100644
--- a/docs/postgres.rst
+++ b/docs/postgres.rst
@@ -82,13 +82,13 @@ complete, restart synapse.  For instance::
     cp homeserver.db homeserver.db.snapshot
     ./synctl start
 
-Assuming your database config file (as described in the section *Synapse
-config*) is named ``database_config.yaml`` and the SQLite snapshot is at
+Assuming your new config file (as described in the section *Synapse config*)
+is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
 ``homeserver.db.snapshot`` then simply run::
 
     python scripts/port_from_sqlite_to_postgres.py \
         --sqlite-database homeserver.db.snapshot \
-        --postgres-config database_config.yaml
+        --postgres-config homeserver-postgres.yaml
 
 The flag ``--curses`` displays a coloured curses progress UI.
 
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
new file mode 100644
index 0000000000..024ddcdbd0
--- /dev/null
+++ b/scripts-dev/convert_server_keys.py
@@ -0,0 +1,113 @@
+import psycopg2
+import yaml
+import sys
+import json
+import time
+import hashlib
+from syutil.base64util import encode_base64
+from syutil.crypto.signing_key import read_signing_keys
+from syutil.crypto.jsonsign import sign_json
+from syutil.jsonutil import encode_canonical_json
+
+
+def select_v1_keys(connection):
+    cursor = connection.cursor()
+    cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
+    rows = cursor.fetchall()
+    cursor.close()
+    results = {}
+    for server_name, key_id, verify_key in rows:
+        results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
+    return results
+
+
+def select_v1_certs(connection):
+    cursor = connection.cursor()
+    cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
+    rows = cursor.fetchall()
+    cursor.close()
+    results = {}
+    for server_name, tls_certificate in rows:
+        results[server_name] = tls_certificate
+    return results
+
+
+def select_v2_json(connection):
+    cursor = connection.cursor()
+    cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
+    rows = cursor.fetchall()
+    cursor.close()
+    results = {}
+    for server_name, key_id, key_json in rows:
+        results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
+    return results
+
+
+def convert_v1_to_v2(server_name, valid_until, keys, certificate):
+    return {
+        "old_verify_keys": {},
+        "server_name": server_name,
+        "verify_keys": keys,
+        "valid_until_ts": valid_until,
+        "tls_fingerprints": [fingerprint(certificate)],
+    }
+
+
+def fingerprint(certificate):
+    finger = hashlib.sha256(certificate)
+    return {"sha256": encode_base64(finger.digest())}
+
+
+def rows_v2(server, json):
+    valid_until = json["valid_until_ts"]
+    key_json = encode_canonical_json(json)
+    for key_id in json["verify_keys"]:
+        yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
+
+
+def main():
+    config = yaml.load(open(sys.argv[1]))
+    valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
+
+    server_name = config["server_name"]
+    signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
+
+    database = config["database"]
+    assert database["name"] == "psycopg2", "Can only convert for postgresql"
+    args = database["args"]
+    args.pop("cp_max")
+    args.pop("cp_min")
+    connection = psycopg2.connect(**args)
+    keys = select_v1_keys(connection)
+    certificates = select_v1_certs(connection)
+    json = select_v2_json(connection)
+
+    result = {}
+    for server in keys:
+        if not server in json:
+            v2_json = convert_v1_to_v2(
+                server, valid_until, keys[server], certificates[server]
+            )
+            v2_json = sign_json(v2_json, server_name, signing_key)
+            result[server] = v2_json
+
+    yaml.safe_dump(result, sys.stdout, default_flow_style=False)
+
+    rows = list(
+        row for server, json in result.items()
+        for row in rows_v2(server, json)
+    )
+
+    cursor = connection.cursor()
+    cursor.executemany(
+        "INSERT INTO server_keys_json ("
+        " server_name, key_id, from_server,"
+        " ts_added_ms, ts_valid_until_ms, key_json"
+        ") VALUES (%s, %s, %s, %s, %s, %s)",
+        rows
+    )
+    connection.commit()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/port_from_sqlite_to_postgres.py b/scripts/port_from_sqlite_to_postgres.py
index f98342db51..e7ed4c309b 100755
--- a/scripts/port_from_sqlite_to_postgres.py
+++ b/scripts/port_from_sqlite_to_postgres.py
@@ -106,7 +106,7 @@ class Store(object):
                     try:
                         txn = conn.cursor()
                         return func(
-                            LoggingTransaction(txn, desc, self.database_engine),
+                            LoggingTransaction(txn, desc, self.database_engine, []),
                             *args, **kwargs
                         )
                     except self.database_engine.module.DatabaseError as e:
@@ -378,9 +378,7 @@ class Porter(object):
 
         for i, row in enumerate(rows):
             rows[i] = tuple(
-                self.postgres_store.database_engine.encode_parameter(
-                    conv(j, col)
-                )
+                conv(j, col)
                 for j, col in enumerate(row)
                 if j > 0
             )
@@ -725,6 +723,9 @@ if __name__ == "__main__":
 
     postgres_config = yaml.safe_load(args.postgres_config)
 
+    if "database" in postgres_config:
+        postgres_config = postgres_config["database"]
+
     if "name" not in postgres_config:
         sys.stderr.write("Malformed database config: no 'name'")
         sys.exit(2)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index c89f444f4e..041e2151b0 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
 """ This is a reference implementation of a Matrix home server.
 """
 
-__version__ = "0.9.0"
+__version__ = "0.9.0-r4"
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 0cc14fb692..7cb91a0be9 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -51,7 +51,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 17
+SCHEMA_VERSION = 18
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index b0020f51db..c9fe5a3555 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -308,6 +308,7 @@ class SQLBaseStore(object):
         self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
         self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self)
         self._pushers_id_gen = IdGenerator("pushers", "id", self)
+        self._push_rule_id_gen = IdGenerator("push_rules", "id", self)
 
     def start_profiling(self):
         self._previous_loop_ts = self._clock.time_msec()
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 38395c66ab..626a5eaf6e 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -23,6 +23,7 @@ from synapse.crypto.event_signing import compute_event_reference_hash
 
 from syutil.base64util import decode_base64
 from syutil.jsonutil import encode_canonical_json
+from contextlib import contextmanager
 
 import logging
 
@@ -41,17 +42,25 @@ class EventsStore(SQLBaseStore):
             self.min_token -= 1
             stream_ordering = self.min_token
 
+        if stream_ordering is None:
+            stream_ordering_manager = yield self._stream_id_gen.get_next(self)
+        else:
+            @contextmanager
+            def stream_ordering_manager():
+                yield stream_ordering
+
         try:
-            yield self.runInteraction(
-                "persist_event",
-                self._persist_event_txn,
-                event=event,
-                context=context,
-                backfilled=backfilled,
-                stream_ordering=stream_ordering,
-                is_new_state=is_new_state,
-                current_state=current_state,
-            )
+            with stream_ordering_manager as stream_ordering:
+                yield self.runInteraction(
+                    "persist_event",
+                    self._persist_event_txn,
+                    event=event,
+                    context=context,
+                    backfilled=backfilled,
+                    stream_ordering=stream_ordering,
+                    is_new_state=is_new_state,
+                    current_state=current_state,
+                )
         except _RollbackButIsFineException:
             pass
 
@@ -95,15 +104,6 @@ class EventsStore(SQLBaseStore):
         # Remove the any existing cache entries for the event_id
         txn.call_after(self._invalidate_get_event_cache, event.event_id)
 
-        if stream_ordering is None:
-            with self._stream_id_gen.get_next_txn(txn) as stream_ordering:
-                return self._persist_event_txn(
-                    txn, event, context, backfilled,
-                    stream_ordering=stream_ordering,
-                    is_new_state=is_new_state,
-                    current_state=current_state,
-                )
-
         # We purposefully do this first since if we include a `current_state`
         # key, we *want* to update the `current_state_events` table
         if current_state:
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index ee7718d5ed..34805e276e 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -19,7 +19,6 @@ from ._base import SQLBaseStore, Table
 from twisted.internet import defer
 
 import logging
-import copy
 import simplejson as json
 
 logger = logging.getLogger(__name__)
@@ -28,46 +27,45 @@ logger = logging.getLogger(__name__)
 class PushRuleStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_push_rules_for_user(self, user_name):
-        sql = (
-            "SELECT "+",".join(PushRuleTable.fields)+" "
-            "FROM "+PushRuleTable.table_name+" "
-            "WHERE user_name = ? "
-            "ORDER BY priority_class DESC, priority DESC"
+        rows = yield self._simple_select_list(
+            table=PushRuleTable.table_name,
+            keyvalues={
+                "user_name": user_name,
+            },
+            retcols=PushRuleTable.fields,
         )
-        rows = yield self._execute("get_push_rules_for_user", None, sql, user_name)
 
-        dicts = []
-        for r in rows:
-            d = {}
-            for i, f in enumerate(PushRuleTable.fields):
-                d[f] = r[i]
-            dicts.append(d)
+        rows.sort(
+            key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
+        )
 
-        defer.returnValue(dicts)
+        defer.returnValue(rows)
 
     @defer.inlineCallbacks
     def get_push_rules_enabled_for_user(self, user_name):
         results = yield self._simple_select_list(
-            PushRuleEnableTable.table_name,
-            {'user_name': user_name},
-            PushRuleEnableTable.fields,
+            table=PushRuleEnableTable.table_name,
+            keyvalues={
+                'user_name': user_name
+            },
+            retcols=PushRuleEnableTable.fields,
             desc="get_push_rules_enabled_for_user",
         )
-        defer.returnValue(
-            {r['rule_id']: False if r['enabled'] == 0 else True for r in results}
-        )
+        defer.returnValue({
+            r['rule_id']: False if r['enabled'] == 0 else True for r in results
+        })
 
     @defer.inlineCallbacks
     def add_push_rule(self, before, after, **kwargs):
-        vals = copy.copy(kwargs)
+        vals = kwargs
         if 'conditions' in vals:
             vals['conditions'] = json.dumps(vals['conditions'])
         if 'actions' in vals:
             vals['actions'] = json.dumps(vals['actions'])
+
         # we could check the rest of the keys are valid column names
         # but sqlite will do that anyway so I think it's just pointless.
-        if 'id' in vals:
-            del vals['id']
+        vals.pop("id", None)
 
         if before or after:
             ret = yield self.runInteraction(
@@ -87,39 +85,39 @@ class PushRuleStore(SQLBaseStore):
             defer.returnValue(ret)
 
     def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
-        after = None
-        relative_to_rule = None
-        if 'after' in kwargs and kwargs['after']:
-            after = kwargs['after']
-            relative_to_rule = after
-        if 'before' in kwargs and kwargs['before']:
-            relative_to_rule = kwargs['before']
-
-        # get the priority of the rule we're inserting after/before
-        sql = (
-            "SELECT priority_class, priority FROM ? "
-            "WHERE user_name = ? and rule_id = ?" % (PushRuleTable.table_name,)
+        after = kwargs.pop("after", None)
+        relative_to_rule = kwargs.pop("before", after)
+
+        res = self._simple_select_one_txn(
+            txn,
+            table=PushRuleTable.table_name,
+            keyvalues={
+                "user_name": user_name,
+                "rule_id": relative_to_rule,
+            },
+            retcols=["priority_class", "priority"],
+            allow_none=True,
         )
-        txn.execute(sql, (user_name, relative_to_rule))
-        res = txn.fetchall()
+
         if not res:
             raise RuleNotFoundException(
                 "before/after rule not found: %s" % (relative_to_rule,)
             )
-        priority_class, base_rule_priority = res[0]
+
+        priority_class = res["priority_class"]
+        base_rule_priority = res["priority"]
 
         if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
             raise InconsistentRuleException(
                 "Given priority class does not match class of relative rule"
             )
 
-        new_rule = copy.copy(kwargs)
-        if 'before' in new_rule:
-            del new_rule['before']
-        if 'after' in new_rule:
-            del new_rule['after']
+        new_rule = kwargs
+        new_rule.pop("before", None)
+        new_rule.pop("after", None)
         new_rule['priority_class'] = priority_class
         new_rule['user_name'] = user_name
+        new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
 
         # check if the priority before/after is free
         new_rule_priority = base_rule_priority
@@ -153,12 +151,11 @@ class PushRuleStore(SQLBaseStore):
 
             txn.execute(sql, (user_name, priority_class, new_rule_priority))
 
-        # now insert the new rule
-        sql = "INSERT INTO "+PushRuleTable.table_name+" ("
-        sql += ",".join(new_rule.keys())+") VALUES ("
-        sql += ", ".join(["?" for _ in new_rule.keys()])+")"
-
-        txn.execute(sql, new_rule.values())
+        self._simple_insert_txn(
+            txn,
+            table=PushRuleTable.table_name,
+            values=new_rule,
+        )
 
     def _add_push_rule_highest_priority_txn(self, txn, user_name,
                                             priority_class, **kwargs):
@@ -176,18 +173,17 @@ class PushRuleStore(SQLBaseStore):
             new_prio = highest_prio + 1
 
         # and insert the new rule
-        new_rule = copy.copy(kwargs)
-        if 'id' in new_rule:
-            del new_rule['id']
+        new_rule = kwargs
+        new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
         new_rule['user_name'] = user_name
         new_rule['priority_class'] = priority_class
         new_rule['priority'] = new_prio
 
-        sql = "INSERT INTO "+PushRuleTable.table_name+" ("
-        sql += ",".join(new_rule.keys())+") VALUES ("
-        sql += ", ".join(["?" for _ in new_rule.keys()])+")"
-
-        txn.execute(sql, new_rule.values())
+        self._simple_insert_txn(
+            txn,
+            table=PushRuleTable.table_name,
+            values=new_rule,
+        )
 
     @defer.inlineCallbacks
     def delete_push_rule(self, user_name, rule_id):
@@ -211,7 +207,7 @@ class PushRuleStore(SQLBaseStore):
         yield self._simple_upsert(
             PushRuleEnableTable.table_name,
             {'user_name': user_name, 'rule_id': rule_id},
-            {'enabled': enabled},
+            {'enabled': 1 if enabled else 0},
             desc="set_push_rule_enabled",
         )
 
diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
new file mode 100644
index 0000000000..c0b0fdfb69
--- /dev/null
+++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
@@ -0,0 +1,32 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS new_server_keys_json (
+    server_name TEXT NOT NULL, -- Server name.
+    key_id TEXT NOT NULL, -- Requested key id.
+    from_server TEXT NOT NULL, -- Which server the keys were fetched from.
+    ts_added_ms BIGINT NOT NULL, -- When the keys were fetched
+    ts_valid_until_ms BIGINT NOT NULL, -- When this version of the keys exipires.
+    key_json bytea NOT NULL, -- JSON certificate for the remote server.
+    CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server)
+);
+
+INSERT INTO new_server_keys_json
+    SELECT server_name, key_id, from_server,ts_added_ms, ts_valid_until_ms, key_json FROM server_keys_json ;
+
+DROP TABLE server_keys_json;
+
+ALTER TABLE new_server_keys_json RENAME TO server_keys_json;
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index e40eb8a8c4..89d1643f10 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -78,14 +78,18 @@ class StreamIdGenerator(object):
         self._current_max = None
         self._unfinished_ids = deque()
 
-    def get_next_txn(self, txn):
+    @defer.inlineCallbacks
+    def get_next(self, store):
         """
         Usage:
-            with stream_id_gen.get_next_txn(txn) as stream_id:
+            with yield stream_id_gen.get_next as stream_id:
                 # ... persist event ...
         """
         if not self._current_max:
-            self._get_or_compute_current_max(txn)
+            yield store.runInteraction(
+                "_compute_current_max",
+                self._get_or_compute_current_max,
+            )
 
         with self._lock:
             self._current_max += 1
@@ -101,7 +105,7 @@ class StreamIdGenerator(object):
                 with self._lock:
                     self._unfinished_ids.remove(next_id)
 
-        return manager()
+        defer.returnValue(manager())
 
     @defer.inlineCallbacks
     def get_max_token(self, store):