diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 594566eb38..d01d46338a 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -268,7 +268,7 @@ class DataStore(RoomMemberStore, RoomStore,
self._stream_order_on_start = self.get_room_max_stream_ordering()
self._min_stream_order_on_start = self.get_room_min_stream_ordering()
- super(DataStore, self).__init__(hs)
+ super(DataStore, self).__init__(db_conn, hs)
def take_presence_startup_info(self):
active_on_startup = self._presence_on_startup
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 6caf7b3356..e94917d9cd 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -162,7 +162,7 @@ class PerformanceCounters(object):
class SQLBaseStore(object):
_TXN_ID = 0
- def __init__(self, hs):
+ def __init__(self, db_conn, hs):
self.hs = hs
self._clock = hs.get_clock()
self._db_pool = hs.get_db_pool()
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index ff14e54c11..c8a1eb016b 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -63,7 +63,7 @@ class AccountDataStore(SQLBaseStore):
"get_account_data_for_user", get_account_data_for_user_txn
)
- @cachedInlineCallbacks(num_args=2)
+ @cachedInlineCallbacks(num_args=2, max_entries=5000)
def get_global_account_data_by_type_for_user(self, data_type, user_id):
"""
Returns:
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index c63935cb07..d8c84b7141 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -48,8 +48,8 @@ def _make_exclusive_regex(services_cache):
class ApplicationServiceStore(SQLBaseStore):
- def __init__(self, hs):
- super(ApplicationServiceStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(ApplicationServiceStore, self).__init__(db_conn, hs)
self.hostname = hs.hostname
self.services_cache = load_appservices(
hs.hostname,
@@ -173,8 +173,8 @@ class ApplicationServiceStore(SQLBaseStore):
class ApplicationServiceTransactionStore(SQLBaseStore):
- def __init__(self, hs):
- super(ApplicationServiceTransactionStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(ApplicationServiceTransactionStore, self).__init__(db_conn, hs)
@defer.inlineCallbacks
def get_appservices_by_state(self, state):
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index a6e6f52a6a..6f235ac051 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -80,8 +80,8 @@ class BackgroundUpdateStore(SQLBaseStore):
BACKGROUND_UPDATE_INTERVAL_MS = 1000
BACKGROUND_UPDATE_DURATION_MS = 100
- def __init__(self, hs):
- super(BackgroundUpdateStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(BackgroundUpdateStore, self).__init__(db_conn, hs)
self._background_update_performance = {}
self._background_update_queue = []
self._background_update_handlers = {}
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index 3c95e90eca..a03d1d6104 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -32,14 +32,14 @@ LAST_SEEN_GRANULARITY = 120 * 1000
class ClientIpStore(background_updates.BackgroundUpdateStore):
- def __init__(self, hs):
+ def __init__(self, db_conn, hs):
self.client_ip_last_seen = Cache(
name="client_ip_last_seen",
keylen=4,
max_entries=50000 * CACHE_SIZE_FACTOR,
)
- super(ClientIpStore, self).__init__(hs)
+ super(ClientIpStore, self).__init__(db_conn, hs)
self.register_background_index_update(
"user_ips_device_index",
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py
index 0b62b493d5..548e795daf 100644
--- a/synapse/storage/deviceinbox.py
+++ b/synapse/storage/deviceinbox.py
@@ -29,8 +29,8 @@ logger = logging.getLogger(__name__)
class DeviceInboxStore(BackgroundUpdateStore):
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
- def __init__(self, hs):
- super(DeviceInboxStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(DeviceInboxStore, self).__init__(db_conn, hs)
self.register_background_index_update(
"device_inbox_stream_index",
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index bb27fd1f70..bd2effdf34 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -26,8 +26,8 @@ logger = logging.getLogger(__name__)
class DeviceStore(SQLBaseStore):
- def __init__(self, hs):
- super(DeviceStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(DeviceStore, self).__init__(db_conn, hs)
# Map of (user_id, device_id) -> bool. If there is an entry that implies
# the device exists.
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index e8133de2fa..55a05c59d5 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -39,8 +39,8 @@ class EventFederationStore(SQLBaseStore):
EVENT_AUTH_STATE_ONLY = "event_auth_state_only"
- def __init__(self, hs):
- super(EventFederationStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(EventFederationStore, self).__init__(db_conn, hs)
self.register_background_update_handler(
self.EVENT_AUTH_STATE_ONLY,
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index d6d8723b4a..8efe2fd4bb 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -65,8 +65,8 @@ def _deserialize_action(actions, is_highlight):
class EventPushActionsStore(SQLBaseStore):
EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
- def __init__(self, hs):
- super(EventPushActionsStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(EventPushActionsStore, self).__init__(db_conn, hs)
self.register_background_index_update(
self.EPA_HIGHLIGHT_INDEX,
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 4298d8baf1..d08f7571d7 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -197,8 +197,8 @@ class EventsStore(SQLBaseStore):
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
- def __init__(self, hs):
- super(EventsStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(EventsStore, self).__init__(db_conn, hs)
self._clock = hs.get_clock()
self.register_background_update_handler(
self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py
index 9e63db5c6c..8fde1aab8e 100644
--- a/synapse/storage/group_server.py
+++ b/synapse/storage/group_server.py
@@ -35,7 +35,9 @@ class GroupServerStore(SQLBaseStore):
keyvalues={
"group_id": group_id,
},
- retcols=("name", "short_description", "long_description", "avatar_url",),
+ retcols=(
+ "name", "short_description", "long_description", "avatar_url", "is_public"
+ ),
allow_none=True,
desc="is_user_in_group",
)
@@ -52,7 +54,7 @@ class GroupServerStore(SQLBaseStore):
return self._simple_select_list(
table="group_users",
keyvalues=keyvalues,
- retcols=("user_id", "is_public",),
+ retcols=("user_id", "is_public", "is_admin",),
desc="get_users_in_group",
)
@@ -855,6 +857,19 @@ class GroupServerStore(SQLBaseStore):
desc="add_room_to_group",
)
+ def update_room_in_group_visibility(self, group_id, room_id, is_public):
+ return self._simple_update(
+ table="group_rooms",
+ keyvalues={
+ "group_id": group_id,
+ "room_id": room_id,
+ },
+ updatevalues={
+ "is_public": is_public,
+ },
+ desc="update_room_in_group_visibility",
+ )
+
def remove_room_from_group(self, group_id, room_id):
def _remove_room_from_group_txn(txn):
self._simple_delete_txn(
@@ -1026,6 +1041,7 @@ class GroupServerStore(SQLBaseStore):
"avatar_url": avatar_url,
"short_description": short_description,
"long_description": long_description,
+ "is_public": True,
},
desc="create_group",
)
@@ -1086,6 +1102,24 @@ class GroupServerStore(SQLBaseStore):
desc="update_remote_attestion",
)
+ def remove_attestation_renewal(self, group_id, user_id):
+ """Remove an attestation that we thought we should renew, but actually
+ shouldn't. Ideally this would never get called as we would never
+ incorrectly try and do attestations for local users on local groups.
+
+ Args:
+ group_id (str)
+ user_id (str)
+ """
+ return self._simple_delete(
+ table="group_attestations_renewals",
+ keyvalues={
+ "group_id": group_id,
+ "user_id": user_id,
+ },
+ desc="remove_attestation_renewal",
+ )
+
@defer.inlineCallbacks
def get_remote_attestation(self, group_id, user_id):
"""Get the attestation that proves the remote agrees that the user is
diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py
index 7110a71279..52e5cdad70 100644
--- a/synapse/storage/media_repository.py
+++ b/synapse/storage/media_repository.py
@@ -254,6 +254,9 @@ class MediaRepositoryStore(SQLBaseStore):
return self.runInteraction("get_expired_url_cache", _get_expired_url_cache_txn)
def delete_url_cache(self, media_ids):
+ if len(media_ids) == 0:
+ return
+
sql = (
"DELETE FROM local_media_repository_url_cache"
" WHERE media_id = ?"
@@ -281,6 +284,9 @@ class MediaRepositoryStore(SQLBaseStore):
)
def delete_url_cache_media(self, media_ids):
+ if len(media_ids) == 0:
+ return
+
def _delete_url_cache_media_txn(txn):
sql = (
"DELETE FROM local_media_repository"
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 817c2185c8..d1691bbac2 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 45
+SCHEMA_VERSION = 46
dir_path = os.path.abspath(os.path.dirname(__file__))
@@ -44,6 +44,13 @@ def prepare_database(db_conn, database_engine, config):
If `config` is None then prepare_database will assert that no upgrade is
necessary, *or* will create a fresh database if the database is empty.
+
+ Args:
+ db_conn:
+ database_engine:
+ config (synapse.config.homeserver.HomeServerConfig|None):
+ application config, or None if we are connecting to an existing
+ database which we expect to be configured already
"""
try:
cur = db_conn.cursor()
@@ -64,6 +71,10 @@ def prepare_database(db_conn, database_engine, config):
else:
_setup_new_database(cur, database_engine)
+ # check if any of our configured dynamic modules want a database
+ if config is not None:
+ _apply_module_schemas(cur, database_engine, config)
+
cur.close()
db_conn.commit()
except Exception:
@@ -283,6 +294,65 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files,
)
+def _apply_module_schemas(txn, database_engine, config):
+ """Apply the module schemas for the dynamic modules, if any
+
+ Args:
+ cur: database cursor
+ database_engine: synapse database engine class
+ config (synapse.config.homeserver.HomeServerConfig):
+ application config
+ """
+ for (mod, _config) in config.password_providers:
+ if not hasattr(mod, 'get_db_schema_files'):
+ continue
+ modname = ".".join((mod.__module__, mod.__name__))
+ _apply_module_schema_files(
+ txn, database_engine, modname, mod.get_db_schema_files(),
+ )
+
+
+def _apply_module_schema_files(cur, database_engine, modname, names_and_streams):
+ """Apply the module schemas for a single module
+
+ Args:
+ cur: database cursor
+ database_engine: synapse database engine class
+ modname (str): fully qualified name of the module
+ names_and_streams (Iterable[(str, file)]): the names and streams of
+ schemas to be applied
+ """
+ cur.execute(
+ database_engine.convert_param_style(
+ "SELECT file FROM applied_module_schemas WHERE module_name = ?"
+ ),
+ (modname,)
+ )
+ applied_deltas = set(d for d, in cur)
+ for (name, stream) in names_and_streams:
+ if name in applied_deltas:
+ continue
+
+ root_name, ext = os.path.splitext(name)
+ if ext != '.sql':
+ raise PrepareDatabaseException(
+ "only .sql files are currently supported for module schemas",
+ )
+
+ logger.info("applying schema %s for %s", name, modname)
+ for statement in get_statements(stream):
+ cur.execute(statement)
+
+ # Mark as done.
+ cur.execute(
+ database_engine.convert_param_style(
+ "INSERT INTO applied_module_schemas (module_name, file)"
+ " VALUES (?,?)",
+ ),
+ (modname, name)
+ )
+
+
def get_statements(f):
statement_buffer = ""
in_comment = False # If we're in a /* ... */ style comment
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index f42b8014c7..12b3cc7f5f 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -27,8 +27,8 @@ logger = logging.getLogger(__name__)
class ReceiptsStore(SQLBaseStore):
- def __init__(self, hs):
- super(ReceiptsStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(ReceiptsStore, self).__init__(db_conn, hs)
self._receipts_stream_cache = StreamChangeCache(
"ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 20acd58fcf..8b9544c209 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -24,8 +24,8 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
class RegistrationStore(background_updates.BackgroundUpdateStore):
- def __init__(self, hs):
- super(RegistrationStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(RegistrationStore, self).__init__(db_conn, hs)
self.clock = hs.get_clock()
@@ -36,12 +36,15 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
columns=["user_id", "device_id"],
)
- self.register_background_index_update(
- "refresh_tokens_device_index",
- index_name="refresh_tokens_device_id",
- table="refresh_tokens",
- columns=["user_id", "device_id"],
- )
+ # we no longer use refresh tokens, but it's possible that some people
+ # might have a background update queued to build this index. Just
+ # clear the background update.
+ @defer.inlineCallbacks
+ def noop_update(progress, batch_size):
+ yield self._end_background_update("refresh_tokens_device_index")
+ defer.returnValue(1)
+ self.register_background_update_handler(
+ "refresh_tokens_device_index", noop_update)
@defer.inlineCallbacks
def add_access_token_to_user(self, user_id, token, device_id=None):
@@ -177,9 +180,11 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
)
if create_profile_with_localpart:
+ # set a default displayname serverside to avoid ugly race
+ # between auto-joins and clients trying to set displaynames
txn.execute(
- "INSERT INTO profiles(user_id) VALUES (?)",
- (create_profile_with_localpart,)
+ "INSERT INTO profiles(user_id, displayname) VALUES (?,?)",
+ (create_profile_with_localpart, create_profile_with_localpart)
)
self._invalidate_cache_and_stream(
@@ -236,12 +241,10 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
"user_set_password_hash", user_set_password_hash_txn
)
- @defer.inlineCallbacks
def user_delete_access_tokens(self, user_id, except_token_id=None,
- device_id=None,
- delete_refresh_tokens=False):
+ device_id=None):
"""
- Invalidate access/refresh tokens belonging to a user
+ Invalidate access tokens belonging to a user
Args:
user_id (str): ID of user the tokens belong to
@@ -250,10 +253,9 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
device_id (str|None): ID of device the tokens are associated with.
If None, tokens associated with any device (or no device) will
be deleted
- delete_refresh_tokens (bool): True to delete refresh tokens as
- well as access tokens.
Returns:
- defer.Deferred:
+ defer.Deferred[list[str, str|None]]: a list of the deleted tokens
+ and device IDs
"""
def f(txn):
keyvalues = {
@@ -262,13 +264,6 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
if device_id is not None:
keyvalues["device_id"] = device_id
- if delete_refresh_tokens:
- self._simple_delete_txn(
- txn,
- table="refresh_tokens",
- keyvalues=keyvalues,
- )
-
items = keyvalues.items()
where_clause = " AND ".join(k + " = ?" for k, _ in items)
values = [v for _, v in items]
@@ -277,14 +272,14 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
values.append(except_token_id)
txn.execute(
- "SELECT token FROM access_tokens WHERE %s" % where_clause,
+ "SELECT token, device_id FROM access_tokens WHERE %s" % where_clause,
values
)
- rows = self.cursor_to_dict(txn)
+ tokens_and_devices = [(r[0], r[1]) for r in txn]
- for row in rows:
+ for token, _ in tokens_and_devices:
self._invalidate_cache_and_stream(
- txn, self.get_user_by_access_token, (row["token"],)
+ txn, self.get_user_by_access_token, (token,)
)
txn.execute(
@@ -292,7 +287,9 @@ class RegistrationStore(background_updates.BackgroundUpdateStore):
values
)
- yield self.runInteraction(
+ return tokens_and_devices
+
+ return self.runInteraction(
"user_delete_access_tokens", f,
)
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 3fa8019eb7..3e77fd3901 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -49,8 +49,8 @@ _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update"
class RoomMemberStore(SQLBaseStore):
- def __init__(self, hs):
- super(RoomMemberStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(RoomMemberStore, self).__init__(db_conn, hs)
self.register_background_update_handler(
_MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile
)
diff --git a/synapse/storage/schema/delta/33/refreshtoken_device_index.sql b/synapse/storage/schema/delta/33/refreshtoken_device_index.sql
deleted file mode 100644
index bb225dafbf..0000000000
--- a/synapse/storage/schema/delta/33/refreshtoken_device_index.sql
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-INSERT INTO background_updates (update_name, progress_json) VALUES
- ('refresh_tokens_device_index', '{}');
diff --git a/synapse/storage/schema/delta/43/user_share.sql b/synapse/storage/schema/delta/43/user_share.sql
index 4501d90cbb..ee7062abe4 100644
--- a/synapse/storage/schema/delta/43/user_share.sql
+++ b/synapse/storage/schema/delta/43/user_share.sql
@@ -29,5 +29,5 @@ CREATE INDEX users_who_share_rooms_r_idx ON users_who_share_rooms(room_id);
CREATE INDEX users_who_share_rooms_o_idx ON users_who_share_rooms(other_user_id);
--- Make sure that we popualte the table initially
+-- Make sure that we populate the table initially
UPDATE user_directory_stream_pos SET stream_id = NULL;
diff --git a/synapse/storage/schema/delta/33/refreshtoken_device.sql b/synapse/storage/schema/delta/46/drop_refresh_tokens.sql
index 290bd6da86..68c48a89a9 100644
--- a/synapse/storage/schema/delta/33/refreshtoken_device.sql
+++ b/synapse/storage/schema/delta/46/drop_refresh_tokens.sql
@@ -1,4 +1,4 @@
-/* Copyright 2016 OpenMarket Ltd
+/* Copyright 2017 New Vector Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,4 +13,5 @@
* limitations under the License.
*/
-ALTER TABLE refresh_tokens ADD COLUMN device_id TEXT;
+/* we no longer use (or create) the refresh_tokens table */
+DROP TABLE IF EXISTS refresh_tokens;
diff --git a/synapse/storage/schema/delta/46/group_server.sql b/synapse/storage/schema/delta/46/group_server.sql
new file mode 100644
index 0000000000..097679bc9a
--- /dev/null
+++ b/synapse/storage/schema/delta/46/group_server.sql
@@ -0,0 +1,32 @@
+/* Copyright 2017 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE groups_new (
+ group_id TEXT NOT NULL,
+ name TEXT, -- the display name of the room
+ avatar_url TEXT,
+ short_description TEXT,
+ long_description TEXT,
+ is_public BOOL NOT NULL -- whether non-members can access group APIs
+);
+
+-- NB: awful hack to get the default to be true on postgres and 1 on sqlite
+INSERT INTO groups_new
+ SELECT group_id, name, avatar_url, short_description, long_description, (1=1) FROM groups;
+
+DROP TABLE groups;
+ALTER TABLE groups_new RENAME TO groups;
+
+CREATE UNIQUE INDEX groups_idx ON groups(group_id);
diff --git a/synapse/storage/schema/delta/23/refresh_tokens.sql b/synapse/storage/schema/delta/46/user_dir_typos.sql
index 34db0cf12b..d9505f8da1 100644
--- a/synapse/storage/schema/delta/23/refresh_tokens.sql
+++ b/synapse/storage/schema/delta/46/user_dir_typos.sql
@@ -1,4 +1,4 @@
-/* Copyright 2015, 2016 OpenMarket Ltd
+/* Copyright 2017 New Vector Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,9 +13,12 @@
* limitations under the License.
*/
-CREATE TABLE IF NOT EXISTS refresh_tokens(
- id INTEGER PRIMARY KEY,
- token TEXT NOT NULL,
- user_id TEXT NOT NULL,
- UNIQUE (token)
-);
+-- this is just embarassing :|
+ALTER TABLE users_in_pubic_room RENAME TO users_in_public_rooms;
+
+-- this is only 300K rows on matrix.org and takes ~3s to generate the index,
+-- so is hopefully not going to block anyone else for that long...
+CREATE INDEX users_in_public_rooms_room_idx ON users_in_public_rooms(room_id);
+CREATE UNIQUE INDEX users_in_public_rooms_user_idx ON users_in_public_rooms(user_id);
+DROP INDEX users_in_pubic_room_room_idx;
+DROP INDEX users_in_pubic_room_user_idx;
diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql
index a7ade69986..42e5cb6df5 100644
--- a/synapse/storage/schema/schema_version.sql
+++ b/synapse/storage/schema/schema_version.sql
@@ -25,3 +25,10 @@ CREATE TABLE IF NOT EXISTS applied_schema_deltas(
file TEXT NOT NULL,
UNIQUE(version, file)
);
+
+-- a list of schema files we have loaded on behalf of dynamic modules
+CREATE TABLE IF NOT EXISTS applied_module_schemas(
+ module_name TEXT NOT NULL,
+ file TEXT NOT NULL,
+ UNIQUE(module_name, file)
+);
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 05d4ef586e..479b04c636 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -33,8 +33,8 @@ class SearchStore(BackgroundUpdateStore):
EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order"
EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist"
- def __init__(self, hs):
- super(SearchStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(SearchStore, self).__init__(db_conn, hs)
self.register_background_update_handler(
self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
)
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 5673e4aa96..dd01b68762 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -63,8 +63,8 @@ class StateStore(SQLBaseStore):
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
- def __init__(self, hs):
- super(StateStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(StateStore, self).__init__(db_conn, hs)
self.register_background_update_handler(
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
self._background_deduplicate_state,
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index 809fdd311f..8f61f7ffae 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -46,8 +46,8 @@ class TransactionStore(SQLBaseStore):
"""A collection of queries for handling PDUs.
"""
- def __init__(self, hs):
- super(TransactionStore, self).__init__(hs)
+ def __init__(self, db_conn, hs):
+ super(TransactionStore, self).__init__(db_conn, hs)
self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index 2a4db3f03c..5dc5b9582a 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -63,7 +63,7 @@ class UserDirectoryStore(SQLBaseStore):
user_ids (list(str)): Users to add
"""
yield self._simple_insert_many(
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
values=[
{
"user_id": user_id,
@@ -219,7 +219,7 @@ class UserDirectoryStore(SQLBaseStore):
@defer.inlineCallbacks
def update_user_in_public_user_list(self, user_id, room_id):
yield self._simple_update_one(
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
keyvalues={"user_id": user_id},
updatevalues={"room_id": room_id},
desc="update_user_in_public_user_list",
@@ -240,7 +240,7 @@ class UserDirectoryStore(SQLBaseStore):
)
self._simple_delete_txn(
txn,
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
keyvalues={"user_id": user_id},
)
txn.call_after(
@@ -256,7 +256,7 @@ class UserDirectoryStore(SQLBaseStore):
@defer.inlineCallbacks
def remove_from_user_in_public_room(self, user_id):
yield self._simple_delete(
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
keyvalues={"user_id": user_id},
desc="remove_from_user_in_public_room",
)
@@ -267,7 +267,7 @@ class UserDirectoryStore(SQLBaseStore):
in the given room_id
"""
return self._simple_select_onecol(
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
keyvalues={"room_id": room_id},
retcol="user_id",
desc="get_users_in_public_due_to_room",
@@ -286,7 +286,7 @@ class UserDirectoryStore(SQLBaseStore):
)
user_ids_pub = yield self._simple_select_onecol(
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
keyvalues={"room_id": room_id},
retcol="user_id",
desc="get_users_in_dir_due_to_room",
@@ -514,7 +514,7 @@ class UserDirectoryStore(SQLBaseStore):
def _delete_all_from_user_dir_txn(txn):
txn.execute("DELETE FROM user_directory")
txn.execute("DELETE FROM user_directory_search")
- txn.execute("DELETE FROM users_in_pubic_room")
+ txn.execute("DELETE FROM users_in_public_rooms")
txn.execute("DELETE FROM users_who_share_rooms")
txn.call_after(self.get_user_in_directory.invalidate_all)
txn.call_after(self.get_user_in_public_room.invalidate_all)
@@ -537,7 +537,7 @@ class UserDirectoryStore(SQLBaseStore):
@cached()
def get_user_in_public_room(self, user_id):
return self._simple_select_one(
- table="users_in_pubic_room",
+ table="users_in_public_rooms",
keyvalues={"user_id": user_id},
retcols=("room_id",),
allow_none=True,
@@ -641,7 +641,7 @@ class UserDirectoryStore(SQLBaseStore):
SELECT d.user_id, display_name, avatar_url
FROM user_directory_search
INNER JOIN user_directory AS d USING (user_id)
- LEFT JOIN users_in_pubic_room AS p USING (user_id)
+ LEFT JOIN users_in_public_rooms AS p USING (user_id)
LEFT JOIN (
SELECT other_user_id AS user_id FROM users_who_share_rooms
WHERE user_id = ? AND share_private
@@ -680,7 +680,7 @@ class UserDirectoryStore(SQLBaseStore):
SELECT d.user_id, display_name, avatar_url
FROM user_directory_search
INNER JOIN user_directory AS d USING (user_id)
- LEFT JOIN users_in_pubic_room AS p USING (user_id)
+ LEFT JOIN users_in_public_rooms AS p USING (user_id)
LEFT JOIN (
SELECT other_user_id AS user_id FROM users_who_share_rooms
WHERE user_id = ? AND share_private
|