From f3f0ab10fe766c766dedf9d80e4ef198e3e45c09 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 13:00:16 +0000 Subject: Move scripts directory inside synapse, exposing as setuptools entry_points (#12118) * Two scripts are basically entry_points already * Move and rename scripts/* to synapse/_scripts/*.py * Delete sync_room_to_group.pl * Expose entry points in setup.py * Update linter script and config * Fixup scripts & docs mentioning scripts that moved Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- .ci/scripts/test_export_data_command.sh | 4 +- .ci/scripts/test_synapse_port_db.sh | 12 +- .dockerignore | 1 - MANIFEST.in | 1 - changelog.d/12118.misc | 1 + docker/Dockerfile | 1 - docs/development/database_schema.md | 6 +- docs/usage/administration/admin_api/README.md | 2 +- mypy.ini | 4 + scripts-dev/generate_sample_config | 10 +- scripts-dev/lint.sh | 7 - scripts-dev/make_full_schema.sh | 6 +- scripts/export_signing_key | 100 -- scripts/generate_config | 78 -- scripts/generate_log_config | 44 - scripts/generate_signing_key.py | 36 - scripts/hash_password | 79 -- scripts/move_remote_media_to_new_store.py | 118 -- scripts/register_new_matrix_user | 19 - scripts/synapse_port_db | 1253 ------------------- scripts/synapse_review_recent_signups | 19 - scripts/sync_room_to_group.pl | 45 - scripts/update_synapse_database | 117 -- setup.py | 14 +- snap/snapcraft.yaml | 2 +- synapse/_scripts/export_signing_key.py | 103 ++ synapse/_scripts/generate_config.py | 83 ++ synapse/_scripts/generate_log_config.py | 49 + synapse/_scripts/generate_signing_key.py | 41 + synapse/_scripts/hash_password.py | 83 ++ synapse/_scripts/move_remote_media_to_new_store.py | 118 ++ synapse/_scripts/synapse_port_db.py | 1257 ++++++++++++++++++++ synapse/_scripts/update_synapse_database.py | 117 ++ synapse/config/_base.py | 2 +- tox.ini | 8 - 35 files changed, 1891 insertions(+), 1949 deletions(-) create mode 100644 changelog.d/12118.misc delete mode 100755 scripts/export_signing_key delete mode 100755 scripts/generate_config delete mode 100755 scripts/generate_log_config delete mode 100755 scripts/generate_signing_key.py delete mode 100755 scripts/hash_password delete mode 100755 scripts/move_remote_media_to_new_store.py delete mode 100755 scripts/register_new_matrix_user delete mode 100755 scripts/synapse_port_db delete mode 100755 scripts/synapse_review_recent_signups delete mode 100755 scripts/sync_room_to_group.pl delete mode 100755 scripts/update_synapse_database create mode 100755 synapse/_scripts/export_signing_key.py create mode 100755 synapse/_scripts/generate_config.py create mode 100755 synapse/_scripts/generate_log_config.py create mode 100755 synapse/_scripts/generate_signing_key.py create mode 100755 synapse/_scripts/hash_password.py create mode 100755 synapse/_scripts/move_remote_media_to_new_store.py create mode 100755 synapse/_scripts/synapse_port_db.py create mode 100755 synapse/_scripts/update_synapse_database.py diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh index ab96387a0a..224cae9216 100755 --- a/.ci/scripts/test_export_data_command.sh +++ b/.ci/scripts/test_export_data_command.sh @@ -21,7 +21,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Run the export-data command on the sqlite test database python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ @@ -41,7 +41,7 @@ fi # Port the SQLite databse to postgres so we can check command works against postgres echo "+++ Port SQLite3 databse to postgres" -scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # Run the export-data command on postgres database python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh index 797904e64c..91bd966f32 100755 --- a/.ci/scripts/test_synapse_port_db.sh +++ b/.ci/scripts/test_synapse_port_db.sh @@ -25,17 +25,19 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Create the PostgreSQL database. .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against test database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`, +# but coverage seems unable to find the entrypoints installed by `pip install -e .`. +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # We should be able to run twice against the same database. echo "+++ Run synapse_port_db a second time" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml ##### @@ -46,7 +48,7 @@ echo "--- Prepare empty SQLite database" # we do this by deleting the sqlite db, and then doing the same again. rm .ci/test_db.db -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # re-create the PostgreSQL database. .ci/scripts/postgres_exec.py \ @@ -54,4 +56,4 @@ scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-b "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against empty database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml diff --git a/.dockerignore b/.dockerignore index f6c638b0a2..617f701597 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,7 +3,6 @@ # things to include !docker -!scripts !synapse !MANIFEST.in !README.rst diff --git a/MANIFEST.in b/MANIFEST.in index 76d14eb642..7e903518e1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -17,7 +17,6 @@ recursive-include synapse/storage *.txt recursive-include synapse/storage *.md recursive-include docs * -recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi recursive-include tests *.py diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc new file mode 100644 index 0000000000..a2c397d907 --- /dev/null +++ b/changelog.d/12118.misc @@ -0,0 +1 @@ +Move scripts to Synapse package and expose as setuptools entry points. diff --git a/docker/Dockerfile b/docker/Dockerfile index a8bb9b0e7f..327275a9ca 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,7 +46,6 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Copy just what we need to pip install -COPY scripts /synapse/scripts/ COPY MANIFEST.in README.rst setup.py synctl /synapse/ COPY synapse/__init__.py /synapse/synapse/__init__.py COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index a767d3af9f..d996a7caa2 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -158,9 +158,9 @@ same as integers. There are three separate aspects to this: * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in - `scripts/synapse_port_db`. This tells the port script to cast the integer - value from SQLite to a boolean before writing the value to the postgres - database. + `synapse/_scripts/synapse_port_db.py`. This tells the port script to cast + the integer value from SQLite to a boolean before writing the value to the + postgres database. * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index 2fca96f8be..3cbedc5dfa 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; ``` A new server admin user can also be created using the `register_new_matrix_user` -command. This is a script that is located in the `scripts/` directory, or possibly +command. This is a script that is distributed as part of synapse. It is possibly already on your `$PATH` depending on how Synapse was installed. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. diff --git a/mypy.ini b/mypy.ini index 38ff787609..6b1e995e64 100644 --- a/mypy.ini +++ b/mypy.ini @@ -23,6 +23,10 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( + |synapse/_scripts/export_signing_key.py + |synapse/_scripts/move_remote_media_to_new_store.py + |synapse/_scripts/synapse_port_db.py + |synapse/_scripts/update_synapse_database.py |synapse/storage/databases/__init__.py |synapse/storage/databases/main/__init__.py |synapse/storage/databases/main/cache.py diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config index 4cd1d1d5b8..185e277933 100755 --- a/scripts-dev/generate_sample_config +++ b/scripts-dev/generate_sample_config @@ -10,19 +10,19 @@ SAMPLE_CONFIG="docs/sample_config.yaml" SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" check() { - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1 + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1 } if [ "$1" == "--check" ]; then - diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || { + diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 exit 1 } - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || { + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 exit 1 } else - ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" - ./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG" + synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" + synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG" fi diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index b6554a73c1..df4d4934d0 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -84,13 +84,6 @@ else files=( "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them - "scripts/export_signing_key" - "scripts/generate_config" - "scripts/generate_log_config" - "scripts/hash_password" - "scripts/register_new_matrix_user" - "scripts/synapse_port_db" - "scripts/update_synapse_database" "scripts-dev" "scripts-dev/build_debian_packages" "scripts-dev/sign_json" diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index c3c90f4ec6..f0e22d4ca2 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG" +synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." @@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then # No coverage needed - scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" else # Coverage desired - coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" fi # Delete schema_version, applied_schema_deltas and applied_module_schemas tables diff --git a/scripts/export_signing_key b/scripts/export_signing_key deleted file mode 100755 index bf0139bd64..0000000000 --- a/scripts/export_signing_key +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import sys -import time -from typing import Optional - -import nacl.signing -from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys - - -def exit(status: int = 0, message: Optional[str] = None): - if message: - print(message, file=sys.stderr) - sys.exit(status) - - -def format_plain(public_key: nacl.signing.VerifyKey): - print( - "%s:%s %s" - % ( - public_key.alg, - public_key.version, - encode_verify_key_base64(public_key), - ) - ) - - -def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): - print( - ' "%s:%s": { key: "%s", expired_ts: %i }' - % ( - public_key.alg, - public_key.version, - encode_verify_key_base64(public_key), - expiry_ts, - ) - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "key_file", - nargs="+", - type=argparse.FileType("r"), - help="The key file to read", - ) - - parser.add_argument( - "-x", - action="store_true", - dest="for_config", - help="format the output for inclusion in the old_signing_keys config setting", - ) - - parser.add_argument( - "--expiry-ts", - type=int, - default=int(time.time() * 1000) + 6 * 3600000, - help=( - "The expiry time to use for -x, in milliseconds since 1970. The default " - "is (now+6h)." - ), - ) - - args = parser.parse_args() - - formatter = ( - (lambda k: format_for_config(k, args.expiry_ts)) - if args.for_config - else format_plain - ) - - keys = [] - for file in args.key_file: - try: - res = read_signing_keys(file) - except Exception as e: - exit( - status=1, - message="Error reading key from file %s: %s %s" - % (file.name, type(e), e), - ) - res = [] - for key in res: - formatter(get_verify_key(key)) diff --git a/scripts/generate_config b/scripts/generate_config deleted file mode 100755 index 931b40c045..0000000000 --- a/scripts/generate_config +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import shutil -import sys - -from synapse.config.homeserver import HomeServerConfig - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--config-dir", - default="CONFDIR", - help="The path where the config files are kept. Used to create filenames for " - "things like the log config and the signing key. Default: %(default)s", - ) - - parser.add_argument( - "--data-dir", - default="DATADIR", - help="The path where the data files are kept. Used to create filenames for " - "things like the database and media store. Default: %(default)s", - ) - - parser.add_argument( - "--server-name", - default="SERVERNAME", - help="The server name. Used to initialise the server_name config param, but also " - "used in the names of some of the config files. Default: %(default)s", - ) - - parser.add_argument( - "--report-stats", - action="store", - help="Whether the generated config reports anonymized usage statistics", - choices=["yes", "no"], - ) - - parser.add_argument( - "--generate-secrets", - action="store_true", - help="Enable generation of new secrets for things like the macaroon_secret_key." - "By default, these parameters will be left unset.", - ) - - parser.add_argument( - "-o", - "--output-file", - type=argparse.FileType("w"), - default=sys.stdout, - help="File to write the configuration to. Default: stdout", - ) - - parser.add_argument( - "--header-file", - type=argparse.FileType("r"), - help="File from which to read a header, which will be printed before the " - "generated config.", - ) - - args = parser.parse_args() - - report_stats = args.report_stats - if report_stats is not None: - report_stats = report_stats == "yes" - - conf = HomeServerConfig().generate_config( - config_dir_path=args.config_dir, - data_dir_path=args.data_dir, - server_name=args.server_name, - generate_secrets=args.generate_secrets, - report_stats=report_stats, - ) - - if args.header_file: - shutil.copyfileobj(args.header_file, args.output_file) - - args.output_file.write(conf) diff --git a/scripts/generate_log_config b/scripts/generate_log_config deleted file mode 100755 index e72a0dafb7..0000000000 --- a/scripts/generate_log_config +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2020 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import sys - -from synapse.config.logger import DEFAULT_LOG_CONFIG - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-o", - "--output-file", - type=argparse.FileType("w"), - default=sys.stdout, - help="File to write the configuration to. Default: stdout", - ) - - parser.add_argument( - "-f", - "--log-file", - type=str, - default="/var/log/matrix-synapse/homeserver.log", - help="name of the log file", - ) - - args = parser.parse_args() - out = args.output_file - out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file)) - out.flush() diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py deleted file mode 100755 index 07df25a809..0000000000 --- a/scripts/generate_signing_key.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import sys - -from signedjson.key import generate_signing_key, write_signing_keys - -from synapse.util.stringutils import random_string - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-o", - "--output_file", - type=argparse.FileType("w"), - default=sys.stdout, - help="Where to write the output to", - ) - args = parser.parse_args() - - key_id = "a_" + random_string(4) - key = (generate_signing_key(key_id),) - write_signing_keys(args.output_file, key) diff --git a/scripts/hash_password b/scripts/hash_password deleted file mode 100755 index 1d6fb0d700..0000000000 --- a/scripts/hash_password +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python - -import argparse -import getpass -import sys -import unicodedata - -import bcrypt -import yaml - -bcrypt_rounds = 12 -password_pepper = "" - - -def prompt_for_pass(): - password = getpass.getpass("Password: ") - - if not password: - raise Exception("Password cannot be blank.") - - confirm_password = getpass.getpass("Confirm password: ") - - if password != confirm_password: - raise Exception("Passwords do not match.") - - return password - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=( - "Calculate the hash of a new password, so that passwords can be reset" - ) - ) - parser.add_argument( - "-p", - "--password", - default=None, - help="New password for user. Will prompt if omitted.", - ) - parser.add_argument( - "-c", - "--config", - type=argparse.FileType("r"), - help=( - "Path to server config file. " - "Used to read in bcrypt_rounds and password_pepper." - ), - ) - - args = parser.parse_args() - if "config" in args and args.config: - config = yaml.safe_load(args.config) - bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds) - password_config = config.get("password_config", None) or {} - password_pepper = password_config.get("pepper", password_pepper) - password = args.password - - if not password: - password = prompt_for_pass() - - # On Python 2, make sure we decode it to Unicode before we normalise it - if isinstance(password, bytes): - try: - password = password.decode(sys.stdin.encoding) - except UnicodeDecodeError: - print( - "ERROR! Your password is not decodable using your terminal encoding (%s)." - % (sys.stdin.encoding,) - ) - - pw = unicodedata.normalize("NFKC", password) - - hashed = bcrypt.hashpw( - pw.encode("utf8") + password_pepper.encode("utf8"), - bcrypt.gensalt(bcrypt_rounds), - ).decode("ascii") - - print(hashed) diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py deleted file mode 100755 index 875aa4781f..0000000000 --- a/scripts/move_remote_media_to_new_store.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Moves a list of remote media from one media store to another. - -The input should be a list of media files to be moved, one per line. Each line -should be formatted:: - - | - -This can be extracted from postgres with:: - - psql --tuples-only -A -c "select media_origin, filesystem_id from - matrix.remote_media_cache where ..." - -To use, pipe the above into:: - - PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py -""" - -import argparse -import logging -import os -import shutil -import sys - -from synapse.rest.media.v1.filepath import MediaFilePaths - -logger = logging.getLogger() - - -def main(src_repo, dest_repo): - src_paths = MediaFilePaths(src_repo) - dest_paths = MediaFilePaths(dest_repo) - for line in sys.stdin: - line = line.strip() - parts = line.split("|") - if len(parts) != 2: - print("Unable to parse input line %s" % line, file=sys.stderr) - sys.exit(1) - - move_media(parts[0], parts[1], src_paths, dest_paths) - - -def move_media(origin_server, file_id, src_paths, dest_paths): - """Move the given file, and any thumbnails, to the dest repo - - Args: - origin_server (str): - file_id (str): - src_paths (MediaFilePaths): - dest_paths (MediaFilePaths): - """ - logger.info("%s/%s", origin_server, file_id) - - # check that the original exists - original_file = src_paths.remote_media_filepath(origin_server, file_id) - if not os.path.exists(original_file): - logger.warning( - "Original for %s/%s (%s) does not exist", - origin_server, - file_id, - original_file, - ) - else: - mkdir_and_move( - original_file, dest_paths.remote_media_filepath(origin_server, file_id) - ) - - # now look for thumbnails - original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) - if not os.path.exists(original_thumb_dir): - return - - mkdir_and_move( - original_thumb_dir, - dest_paths.remote_media_thumbnail_dir(origin_server, file_id), - ) - - -def mkdir_and_move(original_file, dest_file): - dirname = os.path.dirname(dest_file) - if not os.path.exists(dirname): - logger.debug("mkdir %s", dirname) - os.makedirs(dirname) - logger.debug("mv %s %s", original_file, dest_file) - shutil.move(original_file, dest_file) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter - ) - parser.add_argument("-v", action="store_true", help="enable debug logging") - parser.add_argument("src_repo", help="Path to source content repo") - parser.add_argument("dest_repo", help="Path to source content repo") - args = parser.parse_args() - - logging_config = { - "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", - } - logging.basicConfig(**logging_config) - - main(args.src_repo, args.dest_repo) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user deleted file mode 100755 index 00104b9d62..0000000000 --- a/scripts/register_new_matrix_user +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.register_new_matrix_user import main - -if __name__ == "__main__": - main() diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db deleted file mode 100755 index db354b3c8c..0000000000 --- a/scripts/synapse_port_db +++ /dev/null @@ -1,1253 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import curses -import logging -import sys -import time -import traceback -from typing import Dict, Iterable, Optional, Set - -import yaml -from matrix_common.versionstring import get_distribution_version_string - -from twisted.internet import defer, reactor - -from synapse.config.database import DatabaseConnectionConfig -from synapse.config.homeserver import HomeServerConfig -from synapse.logging.context import ( - LoggingContext, - make_deferred_yieldable, - run_in_background, -) -from synapse.storage.database import DatabasePool, make_conn -from synapse.storage.databases.main import PushRuleStore -from synapse.storage.databases.main.account_data import AccountDataWorkerStore -from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore -from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore -from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore -from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore -from synapse.storage.databases.main.events_bg_updates import ( - EventsBackgroundUpdatesStore, -) -from synapse.storage.databases.main.group_server import GroupServerWorkerStore -from synapse.storage.databases.main.media_repository import ( - MediaRepositoryBackgroundUpdateStore, -) -from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore -from synapse.storage.databases.main.pusher import PusherWorkerStore -from synapse.storage.databases.main.registration import ( - RegistrationBackgroundUpdateStore, - find_max_generated_user_id_localpart, -) -from synapse.storage.databases.main.room import RoomBackgroundUpdateStore -from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore -from synapse.storage.databases.main.search import SearchBackgroundUpdateStore -from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore -from synapse.storage.databases.main.stats import StatsStore -from synapse.storage.databases.main.user_directory import ( - UserDirectoryBackgroundUpdateStore, -) -from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore -from synapse.storage.engines import create_engine -from synapse.storage.prepare_database import prepare_database -from synapse.util import Clock - -logger = logging.getLogger("synapse_port_db") - - -BOOLEAN_COLUMNS = { - "events": ["processed", "outlier", "contains_url"], - "rooms": ["is_public", "has_auth_chain_index"], - "event_edges": ["is_state"], - "presence_list": ["accepted"], - "presence_stream": ["currently_active"], - "public_room_list_stream": ["visibility"], - "devices": ["hidden"], - "device_lists_outbound_pokes": ["sent"], - "users_who_share_rooms": ["share_private"], - "groups": ["is_public"], - "group_rooms": ["is_public"], - "group_users": ["is_public", "is_admin"], - "group_summary_rooms": ["is_public"], - "group_room_categories": ["is_public"], - "group_summary_users": ["is_public"], - "group_roles": ["is_public"], - "local_group_membership": ["is_publicised", "is_admin"], - "e2e_room_keys": ["is_verified"], - "account_validity": ["email_sent"], - "redactions": ["have_censored"], - "room_stats_state": ["is_federatable"], - "local_media_repository": ["safe_from_quarantine"], - "users": ["shadow_banned"], - "e2e_fallback_keys_json": ["used"], - "access_tokens": ["used"], -} - - -APPEND_ONLY_TABLES = [ - "event_reference_hashes", - "events", - "event_json", - "state_events", - "room_memberships", - "topics", - "room_names", - "rooms", - "local_media_repository", - "local_media_repository_thumbnails", - "remote_media_cache", - "remote_media_cache_thumbnails", - "redactions", - "event_edges", - "event_auth", - "received_transactions", - "sent_transactions", - "transaction_id_to_pdu", - "users", - "state_groups", - "state_groups_state", - "event_to_state_groups", - "rejections", - "event_search", - "presence_stream", - "push_rules_stream", - "ex_outlier_stream", - "cache_invalidation_stream_by_instance", - "public_room_list_stream", - "state_group_edges", - "stream_ordering_to_exterm", -] - - -IGNORED_TABLES = { - # We don't port these tables, as they're a faff and we can regenerate - # them anyway. - "user_directory", - "user_directory_search", - "user_directory_search_content", - "user_directory_search_docsize", - "user_directory_search_segdir", - "user_directory_search_segments", - "user_directory_search_stat", - "user_directory_search_pos", - "users_who_share_private_rooms", - "users_in_public_room", - # UI auth sessions have foreign keys so additional care needs to be taken, - # the sessions are transient anyway, so ignore them. - "ui_auth_sessions", - "ui_auth_sessions_credentials", - "ui_auth_sessions_ips", -} - - -# Error returned by the run function. Used at the top-level part of the script to -# handle errors and return codes. -end_error = None # type: Optional[str] -# The exec_info for the error, if any. If error is defined but not exec_info the script -# will show only the error message without the stacktrace, if exec_info is defined but -# not the error then the script will show nothing outside of what's printed in the run -# function. If both are defined, the script will print both the error and the stacktrace. -end_error_exec_info = None - - -class Store( - ClientIpBackgroundUpdateStore, - DeviceInboxBackgroundUpdateStore, - DeviceBackgroundUpdateStore, - EventsBackgroundUpdatesStore, - MediaRepositoryBackgroundUpdateStore, - RegistrationBackgroundUpdateStore, - RoomBackgroundUpdateStore, - RoomMemberBackgroundUpdateStore, - SearchBackgroundUpdateStore, - StateBackgroundUpdateStore, - MainStateBackgroundUpdateStore, - UserDirectoryBackgroundUpdateStore, - EndToEndKeyBackgroundStore, - StatsStore, - AccountDataWorkerStore, - PushRuleStore, - PusherWorkerStore, - PresenceBackgroundUpdateStore, - GroupServerWorkerStore, -): - def execute(self, f, *args, **kwargs): - return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) - - def execute_sql(self, sql, *args): - def r(txn): - txn.execute(sql, args) - return txn.fetchall() - - return self.db_pool.runInteraction("execute_sql", r) - - def insert_many_txn(self, txn, table, headers, rows): - sql = "INSERT INTO %s (%s) VALUES (%s)" % ( - table, - ", ".join(k for k in headers), - ", ".join("%s" for _ in headers), - ) - - try: - txn.executemany(sql, rows) - except Exception: - logger.exception("Failed to insert: %s", table) - raise - - def set_room_is_public(self, room_id, is_public): - raise Exception( - "Attempt to set room_is_public during port_db: database not empty?" - ) - - -class MockHomeserver: - def __init__(self, config): - self.clock = Clock(reactor) - self.config = config - self.hostname = config.server.server_name - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) - - def get_clock(self): - return self.clock - - def get_reactor(self): - return reactor - - def get_instance_name(self): - return "master" - - -class Porter(object): - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - async def setup_table(self, table): - if table in APPEND_ONLY_TABLES: - # It's safe to just carry on inserting. - row = await self.postgres_store.db_pool.simple_select_one( - table="port_from_sqlite3", - keyvalues={"table_name": table}, - retcols=("forward_rowid", "backward_rowid"), - allow_none=True, - ) - - total_to_port = None - if row is None: - if table == "sent_transactions": - ( - forward_chunk, - already_ported, - total_to_port, - ) = await self._setup_sent_transactions() - backward_chunk = 0 - else: - await self.postgres_store.db_pool.simple_insert( - table="port_from_sqlite3", - values={ - "table_name": table, - "forward_rowid": 1, - "backward_rowid": 0, - }, - ) - - forward_chunk = 1 - backward_chunk = 0 - already_ported = 0 - else: - forward_chunk = row["forward_rowid"] - backward_chunk = row["backward_rowid"] - - if total_to_port is None: - already_ported, total_to_port = await self._get_total_count_to_port( - table, forward_chunk, backward_chunk - ) - else: - - def delete_all(txn): - txn.execute( - "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) - ) - txn.execute("TRUNCATE %s CASCADE" % (table,)) - - await self.postgres_store.execute(delete_all) - - await self.postgres_store.db_pool.simple_insert( - table="port_from_sqlite3", - values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, - ) - - forward_chunk = 1 - backward_chunk = 0 - - already_ported, total_to_port = await self._get_total_count_to_port( - table, forward_chunk, backward_chunk - ) - - return table, already_ported, total_to_port, forward_chunk, backward_chunk - - async def get_table_constraints(self) -> Dict[str, Set[str]]: - """Returns a map of tables that have foreign key constraints to tables they depend on.""" - - def _get_constraints(txn): - # We can pull the information about foreign key constraints out from - # the postgres schema tables. - sql = """ - SELECT DISTINCT - tc.table_name, - ccu.table_name AS foreign_table_name - FROM - information_schema.table_constraints AS tc - INNER JOIN information_schema.constraint_column_usage AS ccu - USING (table_schema, constraint_name) - WHERE tc.constraint_type = 'FOREIGN KEY' - AND tc.table_name != ccu.table_name; - """ - txn.execute(sql) - - results = {} - for table, foreign_table in txn: - results.setdefault(table, set()).add(foreign_table) - return results - - return await self.postgres_store.db_pool.runInteraction( - "get_table_constraints", _get_constraints - ) - - async def handle_table( - self, table, postgres_size, table_size, forward_chunk, backward_chunk - ): - logger.info( - "Table %s: %i/%i (rows %i-%i) already ported", - table, - postgres_size, - table_size, - backward_chunk + 1, - forward_chunk - 1, - ) - - if not table_size: - return - - self.progress.add_table(table, postgres_size, table_size) - - if table == "event_search": - await self.handle_search_table( - postgres_size, table_size, forward_chunk, backward_chunk - ) - return - - if table in IGNORED_TABLES: - self.progress.update(table, table_size) # Mark table as done - return - - if table == "user_directory_stream_pos": - # We need to make sure there is a single row, `(X, null), as that is - # what synapse expects to be there. - await self.postgres_store.db_pool.simple_insert( - table=table, values={"stream_id": None} - ) - self.progress.update(table, table_size) # Mark table as done - return - - forward_select = ( - "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) - ) - - backward_select = ( - "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) - ) - - do_forward = [True] - do_backward = [True] - - while True: - - def r(txn): - forward_rows = [] - backward_rows = [] - if do_forward[0]: - txn.execute(forward_select, (forward_chunk, self.batch_size)) - forward_rows = txn.fetchall() - if not forward_rows: - do_forward[0] = False - - if do_backward[0]: - txn.execute(backward_select, (backward_chunk, self.batch_size)) - backward_rows = txn.fetchall() - if not backward_rows: - do_backward[0] = False - - if forward_rows or backward_rows: - headers = [column[0] for column in txn.description] - else: - headers = None - - return headers, forward_rows, backward_rows - - headers, frows, brows = await self.sqlite_store.db_pool.runInteraction( - "select", r - ) - - if frows or brows: - if frows: - forward_chunk = max(row[0] for row in frows) + 1 - if brows: - backward_chunk = min(row[0] for row in brows) - 1 - - rows = frows + brows - rows = self._convert_rows(table, headers, rows) - - def insert(txn): - self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) - - self.postgres_store.db_pool.simple_update_one_txn( - txn, - table="port_from_sqlite3", - keyvalues={"table_name": table}, - updatevalues={ - "forward_rowid": forward_chunk, - "backward_rowid": backward_chunk, - }, - ) - - await self.postgres_store.execute(insert) - - postgres_size += len(rows) - - self.progress.update(table, postgres_size) - else: - return - - async def handle_search_table( - self, postgres_size, table_size, forward_chunk, backward_chunk - ): - select = ( - "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" - " FROM event_search as es" - " INNER JOIN events AS e USING (event_id, room_id)" - " WHERE es.rowid >= ?" - " ORDER BY es.rowid LIMIT ?" - ) - - while True: - - def r(txn): - txn.execute(select, (forward_chunk, self.batch_size)) - rows = txn.fetchall() - headers = [column[0] for column in txn.description] - - return headers, rows - - headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) - - if rows: - forward_chunk = rows[-1][0] + 1 - - # We have to treat event_search differently since it has a - # different structure in the two different databases. - def insert(txn): - sql = ( - "INSERT INTO event_search (event_id, room_id, key," - " sender, vector, origin_server_ts, stream_ordering)" - " VALUES (?,?,?,?,to_tsvector('english', ?),?,?)" - ) - - rows_dict = [] - for row in rows: - d = dict(zip(headers, row)) - if "\0" in d["value"]: - logger.warning("dropping search row %s", d) - else: - rows_dict.append(d) - - txn.executemany( - sql, - [ - ( - row["event_id"], - row["room_id"], - row["key"], - row["sender"], - row["value"], - row["origin_server_ts"], - row["stream_ordering"], - ) - for row in rows_dict - ], - ) - - self.postgres_store.db_pool.simple_update_one_txn( - txn, - table="port_from_sqlite3", - keyvalues={"table_name": "event_search"}, - updatevalues={ - "forward_rowid": forward_chunk, - "backward_rowid": backward_chunk, - }, - ) - - await self.postgres_store.execute(insert) - - postgres_size += len(rows) - - self.progress.update("event_search", postgres_size) - - else: - return - - def build_db_store( - self, - db_config: DatabaseConnectionConfig, - allow_outdated_version: bool = False, - ): - """Builds and returns a database store using the provided configuration. - - Args: - db_config: The database configuration - allow_outdated_version: True to suppress errors about the database server - version being too old to run a complete synapse - - Returns: - The built Store object. - """ - self.progress.set_state("Preparing %s" % db_config.config["name"]) - - engine = create_engine(db_config.config) - - hs = MockHomeserver(self.hs_config) - - with make_conn(db_config, engine, "portdb") as db_conn: - engine.check_database( - db_conn, allow_outdated_version=allow_outdated_version - ) - prepare_database(db_conn, engine, config=self.hs_config) - store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) - db_conn.commit() - - return store - - async def run_background_updates_on_postgres(self): - # Manually apply all background updates on the PostgreSQL database. - postgres_ready = ( - await self.postgres_store.db_pool.updates.has_completed_background_updates() - ) - - if not postgres_ready: - # Only say that we're running background updates when there are background - # updates to run. - self.progress.set_state("Running background updates on PostgreSQL") - - while not postgres_ready: - await self.postgres_store.db_pool.updates.do_next_background_update(100) - postgres_ready = await ( - self.postgres_store.db_pool.updates.has_completed_background_updates() - ) - - async def run(self): - """Ports the SQLite database to a PostgreSQL database. - - When a fatal error is met, its message is assigned to the global "end_error" - variable. When this error comes with a stacktrace, its exec_info is assigned to - the global "end_error_exec_info" variable. - """ - global end_error - - try: - # we allow people to port away from outdated versions of sqlite. - self.sqlite_store = self.build_db_store( - DatabaseConnectionConfig("master-sqlite", self.sqlite_config), - allow_outdated_version=True, - ) - - # Check if all background updates are done, abort if not. - updates_complete = ( - await self.sqlite_store.db_pool.updates.has_completed_background_updates() - ) - if not updates_complete: - end_error = ( - "Pending background updates exist in the SQLite3 database." - " Please start Synapse again and wait until every update has finished" - " before running this script.\n" - ) - return - - self.postgres_store = self.build_db_store( - self.hs_config.database.get_single_database() - ) - - await self.run_background_updates_on_postgres() - - self.progress.set_state("Creating port tables") - - def create_port_table(txn): - txn.execute( - "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" - " table_name varchar(100) NOT NULL UNIQUE," - " forward_rowid bigint NOT NULL," - " backward_rowid bigint NOT NULL" - ")" - ) - - # The old port script created a table with just a "rowid" column. - # We want people to be able to rerun this script from an old port - # so that they can pick up any missing events that were not - # ported across. - def alter_table(txn): - txn.execute( - "ALTER TABLE IF EXISTS port_from_sqlite3" - " RENAME rowid TO forward_rowid" - ) - txn.execute( - "ALTER TABLE IF EXISTS port_from_sqlite3" - " ADD backward_rowid bigint NOT NULL DEFAULT 0" - ) - - try: - await self.postgres_store.db_pool.runInteraction( - "alter_table", alter_table - ) - except Exception: - # On Error Resume Next - pass - - await self.postgres_store.db_pool.runInteraction( - "create_port_table", create_port_table - ) - - # Step 2. Set up sequences - # - # We do this before porting the tables so that event if we fail half - # way through the postgres DB always have sequences that are greater - # than their respective tables. If we don't then creating the - # `DataStore` object will fail due to the inconsistency. - self.progress.set_state("Setting up sequence generators") - await self._setup_state_group_id_seq() - await self._setup_user_id_seq() - await self._setup_events_stream_seqs() - await self._setup_sequence( - "device_inbox_sequence", ("device_inbox", "device_federation_outbox") - ) - await self._setup_sequence( - "account_data_sequence", - ("room_account_data", "room_tags_revisions", "account_data"), - ) - await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) - await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) - await self._setup_auth_chain_sequence() - - # Step 3. Get tables. - self.progress.set_state("Fetching tables") - sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol( - table="sqlite_master", keyvalues={"type": "table"}, retcol="name" - ) - - postgres_tables = await self.postgres_store.db_pool.simple_select_onecol( - table="information_schema.tables", - keyvalues={}, - retcol="distinct table_name", - ) - - tables = set(sqlite_tables) & set(postgres_tables) - logger.info("Found %d tables", len(tables)) - - # Step 4. Figure out what still needs copying - self.progress.set_state("Checking on port progress") - setup_res = await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background(self.setup_table, table) - for table in tables - if table not in ["schema_version", "applied_schema_deltas"] - and not table.startswith("sqlite_") - ], - consumeErrors=True, - ) - ) - # Map from table name to args passed to `handle_table`, i.e. a tuple - # of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`. - tables_to_port_info_map = {r[0]: r[1:] for r in setup_res} - - # Step 5. Do the copying. - # - # This is slightly convoluted as we need to ensure tables are ported - # in the correct order due to foreign key constraints. - self.progress.set_state("Copying to postgres") - - constraints = await self.get_table_constraints() - tables_ported = set() # type: Set[str] - - while tables_to_port_info_map: - # Pulls out all tables that are still to be ported and which - # only depend on tables that are already ported (if any). - tables_to_port = [ - table - for table in tables_to_port_info_map - if not constraints.get(table, set()) - tables_ported - ] - - await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.handle_table, - table, - *tables_to_port_info_map.pop(table), - ) - for table in tables_to_port - ], - consumeErrors=True, - ) - ) - - tables_ported.update(tables_to_port) - - self.progress.done() - except Exception as e: - global end_error_exec_info - end_error = str(e) - end_error_exec_info = sys.exc_info() - logger.exception("") - finally: - reactor.stop() - - def _convert_rows(self, table, headers, rows): - bool_col_names = BOOLEAN_COLUMNS.get(table, []) - - bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] - - class BadValueException(Exception): - pass - - def conv(j, col): - if j in bool_cols: - return bool(col) - if isinstance(col, bytes): - return bytearray(col) - elif isinstance(col, str) and "\0" in col: - logger.warning( - "DROPPING ROW: NUL value in table %s col %s: %r", - table, - headers[j], - col, - ) - raise BadValueException() - return col - - outrows = [] - for row in rows: - try: - outrows.append( - tuple(conv(j, col) for j, col in enumerate(row) if j > 0) - ) - except BadValueException: - pass - - return outrows - - async def _setup_sent_transactions(self): - # Only save things from the last day - yesterday = int(time.time() * 1000) - 86400000 - - # And save the max transaction id from each destination - select = ( - "SELECT rowid, * FROM sent_transactions WHERE rowid IN (" - "SELECT max(rowid) FROM sent_transactions" - " GROUP BY destination" - ")" - ) - - def r(txn): - txn.execute(select) - rows = txn.fetchall() - headers = [column[0] for column in txn.description] - - ts_ind = headers.index("ts") - - return headers, [r for r in rows if r[ts_ind] < yesterday] - - headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) - - rows = self._convert_rows("sent_transactions", headers, rows) - - inserted_rows = len(rows) - if inserted_rows: - max_inserted_rowid = max(r[0] for r in rows) - - def insert(txn): - self.postgres_store.insert_many_txn( - txn, "sent_transactions", headers[1:], rows - ) - - await self.postgres_store.execute(insert) - else: - max_inserted_rowid = 0 - - def get_start_id(txn): - txn.execute( - "SELECT rowid FROM sent_transactions WHERE ts >= ?" - " ORDER BY rowid ASC LIMIT 1", - (yesterday,), - ) - - rows = txn.fetchall() - if rows: - return rows[0][0] - else: - return 1 - - next_chunk = await self.sqlite_store.execute(get_start_id) - next_chunk = max(max_inserted_rowid + 1, next_chunk) - - await self.postgres_store.db_pool.simple_insert( - table="port_from_sqlite3", - values={ - "table_name": "sent_transactions", - "forward_rowid": next_chunk, - "backward_rowid": 0, - }, - ) - - def get_sent_table_size(txn): - txn.execute( - "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) - ) - (size,) = txn.fetchone() - return int(size) - - remaining_count = await self.sqlite_store.execute(get_sent_table_size) - - total_count = remaining_count + inserted_rows - - return next_chunk, inserted_rows, total_count - - async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): - frows = await self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk - ) - - brows = await self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk - ) - - return frows[0][0] + brows[0][0] - - async def _get_already_ported_count(self, table): - rows = await self.postgres_store.execute_sql( - "SELECT count(*) FROM %s" % (table,) - ) - - return rows[0][0] - - async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk): - remaining, done = await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self._get_remaining_count_to_port, - table, - forward_chunk, - backward_chunk, - ), - run_in_background(self._get_already_ported_count, table), - ], - ) - ) - - remaining = int(remaining) if remaining else 0 - done = int(done) if done else 0 - - return done, remaining + done - - async def _setup_state_group_id_seq(self) -> None: - curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True - ) - - if not curr_id: - return - - def r(txn): - next_id = curr_id + 1 - txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) - - await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r) - - async def _setup_user_id_seq(self) -> None: - curr_id = await self.sqlite_store.db_pool.runInteraction( - "setup_user_id_seq", find_max_generated_user_id_localpart - ) - - def r(txn): - next_id = curr_id + 1 - txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,)) - - await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) - - async def _setup_events_stream_seqs(self) -> None: - """Set the event stream sequences to the correct values.""" - - # We get called before we've ported the events table, so we need to - # fetch the current positions from the SQLite store. - curr_forward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="events", keyvalues={}, retcol="MAX(stream_ordering)", allow_none=True - ) - - curr_backward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="events", - keyvalues={}, - retcol="MAX(-MIN(stream_ordering), 1)", - allow_none=True, - ) - - def _setup_events_stream_seqs_set_pos(txn): - if curr_forward_id: - txn.execute( - "ALTER SEQUENCE events_stream_seq RESTART WITH %s", - (curr_forward_id + 1,), - ) - - if curr_backward_id: - txn.execute( - "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", - (curr_backward_id + 1,), - ) - - await self.postgres_store.db_pool.runInteraction( - "_setup_events_stream_seqs", - _setup_events_stream_seqs_set_pos, - ) - - async def _setup_sequence( - self, sequence_name: str, stream_id_tables: Iterable[str] - ) -> None: - """Set a sequence to the correct value.""" - current_stream_ids = [] - for stream_id_table in stream_id_tables: - max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table=stream_id_table, - keyvalues={}, - retcol="COALESCE(MAX(stream_id), 1)", - allow_none=True, - ) - current_stream_ids.append(max_stream_id) - - next_id = max(current_stream_ids) + 1 - - def r(txn): - sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,) - txn.execute(sql + " %s", (next_id,)) - - await self.postgres_store.db_pool.runInteraction( - "_setup_%s" % (sequence_name,), r - ) - - async def _setup_auth_chain_sequence(self) -> None: - curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="event_auth_chains", - keyvalues={}, - retcol="MAX(chain_id)", - allow_none=True, - ) - - def r(txn): - txn.execute( - "ALTER SEQUENCE event_auth_chain_id RESTART WITH %s", - (curr_chain_id + 1,), - ) - - if curr_chain_id is not None: - await self.postgres_store.db_pool.runInteraction( - "_setup_event_auth_chain_id", - r, - ) - - -############################################## -# The following is simply UI stuff -############################################## - - -class Progress(object): - """Used to report progress of the port""" - - def __init__(self): - self.tables = {} - - self.start_time = int(time.time()) - - def add_table(self, table, cur, size): - self.tables[table] = { - "start": cur, - "num_done": cur, - "total": size, - "perc": int(cur * 100 / size), - } - - def update(self, table, num_done): - data = self.tables[table] - data["num_done"] = num_done - data["perc"] = int(num_done * 100 / data["total"]) - - def done(self): - pass - - -class CursesProgress(Progress): - """Reports progress to a curses window""" - - def __init__(self, stdscr): - self.stdscr = stdscr - - curses.use_default_colors() - curses.curs_set(0) - - curses.init_pair(1, curses.COLOR_RED, -1) - curses.init_pair(2, curses.COLOR_GREEN, -1) - - self.last_update = 0 - - self.finished = False - - self.total_processed = 0 - self.total_remaining = 0 - - super(CursesProgress, self).__init__() - - def update(self, table, num_done): - super(CursesProgress, self).update(table, num_done) - - self.total_processed = 0 - self.total_remaining = 0 - for data in self.tables.values(): - self.total_processed += data["num_done"] - data["start"] - self.total_remaining += data["total"] - data["num_done"] - - self.render() - - def render(self, force=False): - now = time.time() - - if not force and now - self.last_update < 0.2: - # reactor.callLater(1, self.render) - return - - self.stdscr.clear() - - rows, cols = self.stdscr.getmaxyx() - - duration = int(now) - int(self.start_time) - - minutes, seconds = divmod(duration, 60) - duration_str = "%02dm %02ds" % (minutes, seconds) - - if self.finished: - status = "Time spent: %s (Done!)" % (duration_str,) - else: - - if self.total_processed > 0: - left = float(self.total_remaining) / self.total_processed - - est_remaining = (int(now) - self.start_time) * left - est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60) - else: - est_remaining_str = "Unknown" - status = "Time spent: %s (est. remaining: %s)" % ( - duration_str, - est_remaining_str, - ) - - self.stdscr.addstr(0, 0, status, curses.A_BOLD) - - max_len = max(len(t) for t in self.tables.keys()) - - left_margin = 5 - middle_space = 1 - - items = self.tables.items() - items = sorted(items, key=lambda i: (i[1]["perc"], i[0])) - - for i, (table, data) in enumerate(items): - if i + 2 >= rows: - break - - perc = data["perc"] - - color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) - - self.stdscr.addstr( - i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color - ) - - size = 20 - - progress = "[%s%s]" % ( - "#" * int(perc * size / 100), - " " * (size - int(perc * size / 100)), - ) - - self.stdscr.addstr( - i + 2, - left_margin + max_len + middle_space, - "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), - ) - - if self.finished: - self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") - - self.stdscr.refresh() - self.last_update = time.time() - - def done(self): - self.finished = True - self.render(True) - self.stdscr.getch() - - def set_state(self, state): - self.stdscr.clear() - self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) - self.stdscr.refresh() - - -class TerminalProgress(Progress): - """Just prints progress to the terminal""" - - def update(self, table, num_done): - super(TerminalProgress, self).update(table, num_done) - - data = self.tables[table] - - print( - "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) - ) - - def set_state(self, state): - print(state + "...") - - -############################################## -############################################## - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="A script to port an existing synapse SQLite database to" - " a new PostgreSQL database." - ) - parser.add_argument("-v", action="store_true") - parser.add_argument( - "--sqlite-database", - required=True, - help="The snapshot of the SQLite database file. This must not be" - " currently used by a running synapse server", - ) - parser.add_argument( - "--postgres-config", - type=argparse.FileType("r"), - required=True, - help="The database config file for the PostgreSQL database", - ) - parser.add_argument( - "--curses", action="store_true", help="display a curses based progress UI" - ) - - parser.add_argument( - "--batch-size", - type=int, - default=1000, - help="The number of rows to select from the SQLite table each" - " iteration [default=1000]", - ) - - args = parser.parse_args() - - logging_config = { - "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", - } - - if args.curses: - logging_config["filename"] = "port-synapse.log" - - logging.basicConfig(**logging_config) - - sqlite_config = { - "name": "sqlite3", - "args": { - "database": args.sqlite_database, - "cp_min": 1, - "cp_max": 1, - "check_same_thread": False, - }, - } - - hs_config = yaml.safe_load(args.postgres_config) - - if "database" not in hs_config: - sys.stderr.write("The configuration file must have a 'database' section.\n") - sys.exit(4) - - postgres_config = hs_config["database"] - - if "name" not in postgres_config: - sys.stderr.write("Malformed database config: no 'name'\n") - sys.exit(2) - if postgres_config["name"] != "psycopg2": - sys.stderr.write("Database must use the 'psycopg2' connector.\n") - sys.exit(3) - - config = HomeServerConfig() - config.parse_config_dict(hs_config, "", "") - - def start(stdscr=None): - if stdscr: - progress = CursesProgress(stdscr) - else: - progress = TerminalProgress() - - porter = Porter( - sqlite_config=sqlite_config, - progress=progress, - batch_size=args.batch_size, - hs_config=config, - ) - - @defer.inlineCallbacks - def run(): - with LoggingContext("synapse_port_db_run"): - yield defer.ensureDeferred(porter.run()) - - reactor.callWhenRunning(run) - - reactor.run() - - if args.curses: - curses.wrapper(start) - else: - start() - - if end_error: - if end_error_exec_info: - exc_type, exc_value, exc_traceback = end_error_exec_info - traceback.print_exception(exc_type, exc_value, exc_traceback) - - sys.stderr.write(end_error) - - sys.exit(5) diff --git a/scripts/synapse_review_recent_signups b/scripts/synapse_review_recent_signups deleted file mode 100755 index a36d46e14c..0000000000 --- a/scripts/synapse_review_recent_signups +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.review_recent_signups import main - -if __name__ == "__main__": - main() diff --git a/scripts/sync_room_to_group.pl b/scripts/sync_room_to_group.pl deleted file mode 100755 index f0c2dfadfa..0000000000 --- a/scripts/sync_room_to_group.pl +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use JSON::XS; -use LWP::UserAgent; -use URI::Escape; - -if (@ARGV < 4) { - die "usage: $0 \n"; -} - -my ($hs, $access_token, $room_id, $group_id) = @ARGV; -my $ua = LWP::UserAgent->new(); -$ua->timeout(10); - -if ($room_id =~ /^#/) { - $room_id = uri_escape($room_id); - $room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id}; -} - -my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ]; -my $group_users = [ - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}), - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}), -]; - -die "refusing to sync from empty room" unless (@$room_users); -die "refusing to sync to empty group" unless (@$group_users); - -my $diff = {}; -foreach my $user (@$room_users) { $diff->{$user}++ } -foreach my $user (@$group_users) { $diff->{$user}-- } - -foreach my $user (keys %$diff) { - if ($diff->{$user} == 1) { - warn "inviting $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } - elsif ($diff->{$user} == -1) { - warn "removing $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } -} diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database deleted file mode 100755 index f43676afaa..0000000000 --- a/scripts/update_synapse_database +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import logging -import sys - -import yaml -from matrix_common.versionstring import get_distribution_version_string - -from twisted.internet import defer, reactor - -from synapse.config.homeserver import HomeServerConfig -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.server import HomeServer -from synapse.storage import DataStore - -logger = logging.getLogger("update_database") - - -class MockHomeserver(HomeServer): - DATASTORE_CLASS = DataStore - - def __init__(self, config, **kwargs): - super(MockHomeserver, self).__init__( - config.server.server_name, reactor=reactor, config=config, **kwargs - ) - - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) - - -def run_background_updates(hs): - store = hs.get_datastores().main - - async def run_background_updates(): - await store.db_pool.updates.run_background_updates(sleep=False) - # Stop the reactor to exit the script once every background update is run. - reactor.stop() - - def run(): - # Apply all background updates on the database. - defer.ensureDeferred( - run_as_background_process("background_updates", run_background_updates) - ) - - reactor.callWhenRunning(run) - - reactor.run() - - -def main(): - parser = argparse.ArgumentParser( - description=( - "Updates a synapse database to the latest schema and optionally runs background updates" - " on it." - ) - ) - parser.add_argument("-v", action="store_true") - parser.add_argument( - "--database-config", - type=argparse.FileType("r"), - required=True, - help="Synapse configuration file, giving the details of the database to be updated", - ) - parser.add_argument( - "--run-background-updates", - action="store_true", - required=False, - help="run background updates after upgrading the database schema", - ) - - args = parser.parse_args() - - logging_config = { - "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", - } - - logging.basicConfig(**logging_config) - - # Load, process and sanity-check the config. - hs_config = yaml.safe_load(args.database_config) - - if "database" not in hs_config: - sys.stderr.write("The configuration file must have a 'database' section.\n") - sys.exit(4) - - config = HomeServerConfig() - config.parse_config_dict(hs_config, "", "") - - # Instantiate and initialise the homeserver object. - hs = MockHomeserver(config) - - # Setup instantiates the store within the homeserver object and updates the - # DB. - hs.setup() - - if args.run_background_updates: - run_background_updates(hs) - - -if __name__ == "__main__": - main() diff --git a/setup.py b/setup.py index 26f4650348..318df16766 100755 --- a/setup.py +++ b/setup.py @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import glob import os from typing import Any, Dict @@ -153,8 +152,19 @@ setup( python_requires="~=3.7", entry_points={ "console_scripts": [ + # Application "synapse_homeserver = synapse.app.homeserver:main", "synapse_worker = synapse.app.generic_worker:main", + # Scripts + "export_signing_key = synapse._scripts.export_signing_key:main", + "generate_config = synapse._scripts.generate_config:main", + "generate_log_config = synapse._scripts.generate_log_config:main", + "generate_signing_key = synapse._scripts.generate_signing_key:main", + "hash_password = synapse._scripts.hash_password:main", + "register_new_matrix_user = synapse._scripts.register_new_matrix_user:main", + "synapse_port_db = synapse._scripts.synapse_port_db:main", + "synapse_review_recent_signups = synapse._scripts.review_recent_signups:main", + "update_synapse_database = synapse._scripts.update_synapse_database:main", ] }, classifiers=[ @@ -167,6 +177,6 @@ setup( "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], - scripts=["synctl"] + glob.glob("scripts/*"), + scripts=["synctl"], cmdclass={"test": TestCommand}, ) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 9a01152c15..dd4c8478d5 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -20,7 +20,7 @@ apps: generate-config: command: generate_config generate-signing-key: - command: generate_signing_key.py + command: generate_signing_key register-new-matrix-user: command: register_new_matrix_user plugs: [network] diff --git a/synapse/_scripts/export_signing_key.py b/synapse/_scripts/export_signing_key.py new file mode 100755 index 0000000000..3d254348f1 --- /dev/null +++ b/synapse/_scripts/export_signing_key.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import sys +import time +from typing import Optional + +import nacl.signing +from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys + + +def exit(status: int = 0, message: Optional[str] = None): + if message: + print(message, file=sys.stderr) + sys.exit(status) + + +def format_plain(public_key: nacl.signing.VerifyKey): + print( + "%s:%s %s" + % ( + public_key.alg, + public_key.version, + encode_verify_key_base64(public_key), + ) + ) + + +def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): + print( + ' "%s:%s": { key: "%s", expired_ts: %i }' + % ( + public_key.alg, + public_key.version, + encode_verify_key_base64(public_key), + expiry_ts, + ) + ) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "key_file", + nargs="+", + type=argparse.FileType("r"), + help="The key file to read", + ) + + parser.add_argument( + "-x", + action="store_true", + dest="for_config", + help="format the output for inclusion in the old_signing_keys config setting", + ) + + parser.add_argument( + "--expiry-ts", + type=int, + default=int(time.time() * 1000) + 6 * 3600000, + help=( + "The expiry time to use for -x, in milliseconds since 1970. The default " + "is (now+6h)." + ), + ) + + args = parser.parse_args() + + formatter = ( + (lambda k: format_for_config(k, args.expiry_ts)) + if args.for_config + else format_plain + ) + + for file in args.key_file: + try: + res = read_signing_keys(file) + except Exception as e: + exit( + status=1, + message="Error reading key from file %s: %s %s" + % (file.name, type(e), e), + ) + res = [] + for key in res: + formatter(get_verify_key(key)) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/generate_config.py b/synapse/_scripts/generate_config.py new file mode 100755 index 0000000000..75fce20b12 --- /dev/null +++ b/synapse/_scripts/generate_config.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import argparse +import shutil +import sys + +from synapse.config.homeserver import HomeServerConfig + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--config-dir", + default="CONFDIR", + help="The path where the config files are kept. Used to create filenames for " + "things like the log config and the signing key. Default: %(default)s", + ) + + parser.add_argument( + "--data-dir", + default="DATADIR", + help="The path where the data files are kept. Used to create filenames for " + "things like the database and media store. Default: %(default)s", + ) + + parser.add_argument( + "--server-name", + default="SERVERNAME", + help="The server name. Used to initialise the server_name config param, but also " + "used in the names of some of the config files. Default: %(default)s", + ) + + parser.add_argument( + "--report-stats", + action="store", + help="Whether the generated config reports anonymized usage statistics", + choices=["yes", "no"], + ) + + parser.add_argument( + "--generate-secrets", + action="store_true", + help="Enable generation of new secrets for things like the macaroon_secret_key." + "By default, these parameters will be left unset.", + ) + + parser.add_argument( + "-o", + "--output-file", + type=argparse.FileType("w"), + default=sys.stdout, + help="File to write the configuration to. Default: stdout", + ) + + parser.add_argument( + "--header-file", + type=argparse.FileType("r"), + help="File from which to read a header, which will be printed before the " + "generated config.", + ) + + args = parser.parse_args() + + report_stats = args.report_stats + if report_stats is not None: + report_stats = report_stats == "yes" + + conf = HomeServerConfig().generate_config( + config_dir_path=args.config_dir, + data_dir_path=args.data_dir, + server_name=args.server_name, + generate_secrets=args.generate_secrets, + report_stats=report_stats, + ) + + if args.header_file: + shutil.copyfileobj(args.header_file, args.output_file) + + args.output_file.write(conf) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/generate_log_config.py b/synapse/_scripts/generate_log_config.py new file mode 100755 index 0000000000..82fc763140 --- /dev/null +++ b/synapse/_scripts/generate_log_config.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys + +from synapse.config.logger import DEFAULT_LOG_CONFIG + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "-o", + "--output-file", + type=argparse.FileType("w"), + default=sys.stdout, + help="File to write the configuration to. Default: stdout", + ) + + parser.add_argument( + "-f", + "--log-file", + type=str, + default="/var/log/matrix-synapse/homeserver.log", + help="name of the log file", + ) + + args = parser.parse_args() + out = args.output_file + out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file)) + out.flush() + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/generate_signing_key.py b/synapse/_scripts/generate_signing_key.py new file mode 100755 index 0000000000..bc26d25bfd --- /dev/null +++ b/synapse/_scripts/generate_signing_key.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import sys + +from signedjson.key import generate_signing_key, write_signing_keys + +from synapse.util.stringutils import random_string + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "-o", + "--output_file", + type=argparse.FileType("w"), + default=sys.stdout, + help="Where to write the output to", + ) + args = parser.parse_args() + + key_id = "a_" + random_string(4) + key = (generate_signing_key(key_id),) + write_signing_keys(args.output_file, key) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py new file mode 100755 index 0000000000..708640c7de --- /dev/null +++ b/synapse/_scripts/hash_password.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +import argparse +import getpass +import sys +import unicodedata + +import bcrypt +import yaml + + +def prompt_for_pass(): + password = getpass.getpass("Password: ") + + if not password: + raise Exception("Password cannot be blank.") + + confirm_password = getpass.getpass("Confirm password: ") + + if password != confirm_password: + raise Exception("Passwords do not match.") + + return password + + +def main(): + bcrypt_rounds = 12 + password_pepper = "" + + parser = argparse.ArgumentParser( + description=( + "Calculate the hash of a new password, so that passwords can be reset" + ) + ) + parser.add_argument( + "-p", + "--password", + default=None, + help="New password for user. Will prompt if omitted.", + ) + parser.add_argument( + "-c", + "--config", + type=argparse.FileType("r"), + help=( + "Path to server config file. " + "Used to read in bcrypt_rounds and password_pepper." + ), + ) + + args = parser.parse_args() + if "config" in args and args.config: + config = yaml.safe_load(args.config) + bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds) + password_config = config.get("password_config", None) or {} + password_pepper = password_config.get("pepper", password_pepper) + password = args.password + + if not password: + password = prompt_for_pass() + + # On Python 2, make sure we decode it to Unicode before we normalise it + if isinstance(password, bytes): + try: + password = password.decode(sys.stdin.encoding) + except UnicodeDecodeError: + print( + "ERROR! Your password is not decodable using your terminal encoding (%s)." + % (sys.stdin.encoding,) + ) + + pw = unicodedata.normalize("NFKC", password) + + hashed = bcrypt.hashpw( + pw.encode("utf8") + password_pepper.encode("utf8"), + bcrypt.gensalt(bcrypt_rounds), + ).decode("ascii") + + print(hashed) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py new file mode 100755 index 0000000000..9667d95dfe --- /dev/null +++ b/synapse/_scripts/move_remote_media_to_new_store.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Moves a list of remote media from one media store to another. + +The input should be a list of media files to be moved, one per line. Each line +should be formatted:: + + | + +This can be extracted from postgres with:: + + psql --tuples-only -A -c "select media_origin, filesystem_id from + matrix.remote_media_cache where ..." + +To use, pipe the above into:: + + PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py +""" + +import argparse +import logging +import os +import shutil +import sys + +from synapse.rest.media.v1.filepath import MediaFilePaths + +logger = logging.getLogger() + + +def main(src_repo, dest_repo): + src_paths = MediaFilePaths(src_repo) + dest_paths = MediaFilePaths(dest_repo) + for line in sys.stdin: + line = line.strip() + parts = line.split("|") + if len(parts) != 2: + print("Unable to parse input line %s" % line, file=sys.stderr) + sys.exit(1) + + move_media(parts[0], parts[1], src_paths, dest_paths) + + +def move_media(origin_server, file_id, src_paths, dest_paths): + """Move the given file, and any thumbnails, to the dest repo + + Args: + origin_server (str): + file_id (str): + src_paths (MediaFilePaths): + dest_paths (MediaFilePaths): + """ + logger.info("%s/%s", origin_server, file_id) + + # check that the original exists + original_file = src_paths.remote_media_filepath(origin_server, file_id) + if not os.path.exists(original_file): + logger.warning( + "Original for %s/%s (%s) does not exist", + origin_server, + file_id, + original_file, + ) + else: + mkdir_and_move( + original_file, dest_paths.remote_media_filepath(origin_server, file_id) + ) + + # now look for thumbnails + original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) + if not os.path.exists(original_thumb_dir): + return + + mkdir_and_move( + original_thumb_dir, + dest_paths.remote_media_thumbnail_dir(origin_server, file_id), + ) + + +def mkdir_and_move(original_file, dest_file): + dirname = os.path.dirname(dest_file) + if not os.path.exists(dirname): + logger.debug("mkdir %s", dirname) + os.makedirs(dirname) + logger.debug("mv %s %s", original_file, dest_file) + shutil.move(original_file, dest_file) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("-v", action="store_true", help="enable debug logging") + parser.add_argument("src_repo", help="Path to source content repo") + parser.add_argument("dest_repo", help="Path to source content repo") + args = parser.parse_args() + + logging_config = { + "level": logging.DEBUG if args.v else logging.INFO, + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + } + logging.basicConfig(**logging_config) + + main(args.src_repo, args.dest_repo) diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py new file mode 100755 index 0000000000..c38666da18 --- /dev/null +++ b/synapse/_scripts/synapse_port_db.py @@ -0,0 +1,1257 @@ +#!/usr/bin/env python +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import curses +import logging +import sys +import time +import traceback +from typing import Dict, Iterable, Optional, Set + +import yaml +from matrix_common.versionstring import get_distribution_version_string + +from twisted.internet import defer, reactor + +from synapse.config.database import DatabaseConnectionConfig +from synapse.config.homeserver import HomeServerConfig +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, + run_in_background, +) +from synapse.storage.database import DatabasePool, make_conn +from synapse.storage.databases.main import PushRuleStore +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore +from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore +from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore +from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore +from synapse.storage.databases.main.events_bg_updates import ( + EventsBackgroundUpdatesStore, +) +from synapse.storage.databases.main.group_server import GroupServerWorkerStore +from synapse.storage.databases.main.media_repository import ( + MediaRepositoryBackgroundUpdateStore, +) +from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore +from synapse.storage.databases.main.pusher import PusherWorkerStore +from synapse.storage.databases.main.registration import ( + RegistrationBackgroundUpdateStore, + find_max_generated_user_id_localpart, +) +from synapse.storage.databases.main.room import RoomBackgroundUpdateStore +from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore +from synapse.storage.databases.main.search import SearchBackgroundUpdateStore +from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore +from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.user_directory import ( + UserDirectoryBackgroundUpdateStore, +) +from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore +from synapse.storage.engines import create_engine +from synapse.storage.prepare_database import prepare_database +from synapse.util import Clock + +logger = logging.getLogger("synapse_port_db") + + +BOOLEAN_COLUMNS = { + "events": ["processed", "outlier", "contains_url"], + "rooms": ["is_public", "has_auth_chain_index"], + "event_edges": ["is_state"], + "presence_list": ["accepted"], + "presence_stream": ["currently_active"], + "public_room_list_stream": ["visibility"], + "devices": ["hidden"], + "device_lists_outbound_pokes": ["sent"], + "users_who_share_rooms": ["share_private"], + "groups": ["is_public"], + "group_rooms": ["is_public"], + "group_users": ["is_public", "is_admin"], + "group_summary_rooms": ["is_public"], + "group_room_categories": ["is_public"], + "group_summary_users": ["is_public"], + "group_roles": ["is_public"], + "local_group_membership": ["is_publicised", "is_admin"], + "e2e_room_keys": ["is_verified"], + "account_validity": ["email_sent"], + "redactions": ["have_censored"], + "room_stats_state": ["is_federatable"], + "local_media_repository": ["safe_from_quarantine"], + "users": ["shadow_banned"], + "e2e_fallback_keys_json": ["used"], + "access_tokens": ["used"], +} + + +APPEND_ONLY_TABLES = [ + "event_reference_hashes", + "events", + "event_json", + "state_events", + "room_memberships", + "topics", + "room_names", + "rooms", + "local_media_repository", + "local_media_repository_thumbnails", + "remote_media_cache", + "remote_media_cache_thumbnails", + "redactions", + "event_edges", + "event_auth", + "received_transactions", + "sent_transactions", + "transaction_id_to_pdu", + "users", + "state_groups", + "state_groups_state", + "event_to_state_groups", + "rejections", + "event_search", + "presence_stream", + "push_rules_stream", + "ex_outlier_stream", + "cache_invalidation_stream_by_instance", + "public_room_list_stream", + "state_group_edges", + "stream_ordering_to_exterm", +] + + +IGNORED_TABLES = { + # We don't port these tables, as they're a faff and we can regenerate + # them anyway. + "user_directory", + "user_directory_search", + "user_directory_search_content", + "user_directory_search_docsize", + "user_directory_search_segdir", + "user_directory_search_segments", + "user_directory_search_stat", + "user_directory_search_pos", + "users_who_share_private_rooms", + "users_in_public_room", + # UI auth sessions have foreign keys so additional care needs to be taken, + # the sessions are transient anyway, so ignore them. + "ui_auth_sessions", + "ui_auth_sessions_credentials", + "ui_auth_sessions_ips", +} + + +# Error returned by the run function. Used at the top-level part of the script to +# handle errors and return codes. +end_error = None # type: Optional[str] +# The exec_info for the error, if any. If error is defined but not exec_info the script +# will show only the error message without the stacktrace, if exec_info is defined but +# not the error then the script will show nothing outside of what's printed in the run +# function. If both are defined, the script will print both the error and the stacktrace. +end_error_exec_info = None + + +class Store( + ClientIpBackgroundUpdateStore, + DeviceInboxBackgroundUpdateStore, + DeviceBackgroundUpdateStore, + EventsBackgroundUpdatesStore, + MediaRepositoryBackgroundUpdateStore, + RegistrationBackgroundUpdateStore, + RoomBackgroundUpdateStore, + RoomMemberBackgroundUpdateStore, + SearchBackgroundUpdateStore, + StateBackgroundUpdateStore, + MainStateBackgroundUpdateStore, + UserDirectoryBackgroundUpdateStore, + EndToEndKeyBackgroundStore, + StatsStore, + AccountDataWorkerStore, + PushRuleStore, + PusherWorkerStore, + PresenceBackgroundUpdateStore, + GroupServerWorkerStore, +): + def execute(self, f, *args, **kwargs): + return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) + + def execute_sql(self, sql, *args): + def r(txn): + txn.execute(sql, args) + return txn.fetchall() + + return self.db_pool.runInteraction("execute_sql", r) + + def insert_many_txn(self, txn, table, headers, rows): + sql = "INSERT INTO %s (%s) VALUES (%s)" % ( + table, + ", ".join(k for k in headers), + ", ".join("%s" for _ in headers), + ) + + try: + txn.executemany(sql, rows) + except Exception: + logger.exception("Failed to insert: %s", table) + raise + + def set_room_is_public(self, room_id, is_public): + raise Exception( + "Attempt to set room_is_public during port_db: database not empty?" + ) + + +class MockHomeserver: + def __init__(self, config): + self.clock = Clock(reactor) + self.config = config + self.hostname = config.server.server_name + self.version_string = "Synapse/" + get_distribution_version_string( + "matrix-synapse" + ) + + def get_clock(self): + return self.clock + + def get_reactor(self): + return reactor + + def get_instance_name(self): + return "master" + + +class Porter(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + async def setup_table(self, table): + if table in APPEND_ONLY_TABLES: + # It's safe to just carry on inserting. + row = await self.postgres_store.db_pool.simple_select_one( + table="port_from_sqlite3", + keyvalues={"table_name": table}, + retcols=("forward_rowid", "backward_rowid"), + allow_none=True, + ) + + total_to_port = None + if row is None: + if table == "sent_transactions": + ( + forward_chunk, + already_ported, + total_to_port, + ) = await self._setup_sent_transactions() + backward_chunk = 0 + else: + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={ + "table_name": table, + "forward_rowid": 1, + "backward_rowid": 0, + }, + ) + + forward_chunk = 1 + backward_chunk = 0 + already_ported = 0 + else: + forward_chunk = row["forward_rowid"] + backward_chunk = row["backward_rowid"] + + if total_to_port is None: + already_ported, total_to_port = await self._get_total_count_to_port( + table, forward_chunk, backward_chunk + ) + else: + + def delete_all(txn): + txn.execute( + "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) + ) + txn.execute("TRUNCATE %s CASCADE" % (table,)) + + await self.postgres_store.execute(delete_all) + + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, + ) + + forward_chunk = 1 + backward_chunk = 0 + + already_ported, total_to_port = await self._get_total_count_to_port( + table, forward_chunk, backward_chunk + ) + + return table, already_ported, total_to_port, forward_chunk, backward_chunk + + async def get_table_constraints(self) -> Dict[str, Set[str]]: + """Returns a map of tables that have foreign key constraints to tables they depend on.""" + + def _get_constraints(txn): + # We can pull the information about foreign key constraints out from + # the postgres schema tables. + sql = """ + SELECT DISTINCT + tc.table_name, + ccu.table_name AS foreign_table_name + FROM + information_schema.table_constraints AS tc + INNER JOIN information_schema.constraint_column_usage AS ccu + USING (table_schema, constraint_name) + WHERE tc.constraint_type = 'FOREIGN KEY' + AND tc.table_name != ccu.table_name; + """ + txn.execute(sql) + + results = {} + for table, foreign_table in txn: + results.setdefault(table, set()).add(foreign_table) + return results + + return await self.postgres_store.db_pool.runInteraction( + "get_table_constraints", _get_constraints + ) + + async def handle_table( + self, table, postgres_size, table_size, forward_chunk, backward_chunk + ): + logger.info( + "Table %s: %i/%i (rows %i-%i) already ported", + table, + postgres_size, + table_size, + backward_chunk + 1, + forward_chunk - 1, + ) + + if not table_size: + return + + self.progress.add_table(table, postgres_size, table_size) + + if table == "event_search": + await self.handle_search_table( + postgres_size, table_size, forward_chunk, backward_chunk + ) + return + + if table in IGNORED_TABLES: + self.progress.update(table, table_size) # Mark table as done + return + + if table == "user_directory_stream_pos": + # We need to make sure there is a single row, `(X, null), as that is + # what synapse expects to be there. + await self.postgres_store.db_pool.simple_insert( + table=table, values={"stream_id": None} + ) + self.progress.update(table, table_size) # Mark table as done + return + + forward_select = ( + "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) + ) + + backward_select = ( + "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) + ) + + do_forward = [True] + do_backward = [True] + + while True: + + def r(txn): + forward_rows = [] + backward_rows = [] + if do_forward[0]: + txn.execute(forward_select, (forward_chunk, self.batch_size)) + forward_rows = txn.fetchall() + if not forward_rows: + do_forward[0] = False + + if do_backward[0]: + txn.execute(backward_select, (backward_chunk, self.batch_size)) + backward_rows = txn.fetchall() + if not backward_rows: + do_backward[0] = False + + if forward_rows or backward_rows: + headers = [column[0] for column in txn.description] + else: + headers = None + + return headers, forward_rows, backward_rows + + headers, frows, brows = await self.sqlite_store.db_pool.runInteraction( + "select", r + ) + + if frows or brows: + if frows: + forward_chunk = max(row[0] for row in frows) + 1 + if brows: + backward_chunk = min(row[0] for row in brows) - 1 + + rows = frows + brows + rows = self._convert_rows(table, headers, rows) + + def insert(txn): + self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) + + self.postgres_store.db_pool.simple_update_one_txn( + txn, + table="port_from_sqlite3", + keyvalues={"table_name": table}, + updatevalues={ + "forward_rowid": forward_chunk, + "backward_rowid": backward_chunk, + }, + ) + + await self.postgres_store.execute(insert) + + postgres_size += len(rows) + + self.progress.update(table, postgres_size) + else: + return + + async def handle_search_table( + self, postgres_size, table_size, forward_chunk, backward_chunk + ): + select = ( + "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" + " FROM event_search as es" + " INNER JOIN events AS e USING (event_id, room_id)" + " WHERE es.rowid >= ?" + " ORDER BY es.rowid LIMIT ?" + ) + + while True: + + def r(txn): + txn.execute(select, (forward_chunk, self.batch_size)) + rows = txn.fetchall() + headers = [column[0] for column in txn.description] + + return headers, rows + + headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) + + if rows: + forward_chunk = rows[-1][0] + 1 + + # We have to treat event_search differently since it has a + # different structure in the two different databases. + def insert(txn): + sql = ( + "INSERT INTO event_search (event_id, room_id, key," + " sender, vector, origin_server_ts, stream_ordering)" + " VALUES (?,?,?,?,to_tsvector('english', ?),?,?)" + ) + + rows_dict = [] + for row in rows: + d = dict(zip(headers, row)) + if "\0" in d["value"]: + logger.warning("dropping search row %s", d) + else: + rows_dict.append(d) + + txn.executemany( + sql, + [ + ( + row["event_id"], + row["room_id"], + row["key"], + row["sender"], + row["value"], + row["origin_server_ts"], + row["stream_ordering"], + ) + for row in rows_dict + ], + ) + + self.postgres_store.db_pool.simple_update_one_txn( + txn, + table="port_from_sqlite3", + keyvalues={"table_name": "event_search"}, + updatevalues={ + "forward_rowid": forward_chunk, + "backward_rowid": backward_chunk, + }, + ) + + await self.postgres_store.execute(insert) + + postgres_size += len(rows) + + self.progress.update("event_search", postgres_size) + + else: + return + + def build_db_store( + self, + db_config: DatabaseConnectionConfig, + allow_outdated_version: bool = False, + ): + """Builds and returns a database store using the provided configuration. + + Args: + db_config: The database configuration + allow_outdated_version: True to suppress errors about the database server + version being too old to run a complete synapse + + Returns: + The built Store object. + """ + self.progress.set_state("Preparing %s" % db_config.config["name"]) + + engine = create_engine(db_config.config) + + hs = MockHomeserver(self.hs_config) + + with make_conn(db_config, engine, "portdb") as db_conn: + engine.check_database( + db_conn, allow_outdated_version=allow_outdated_version + ) + prepare_database(db_conn, engine, config=self.hs_config) + store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) + db_conn.commit() + + return store + + async def run_background_updates_on_postgres(self): + # Manually apply all background updates on the PostgreSQL database. + postgres_ready = ( + await self.postgres_store.db_pool.updates.has_completed_background_updates() + ) + + if not postgres_ready: + # Only say that we're running background updates when there are background + # updates to run. + self.progress.set_state("Running background updates on PostgreSQL") + + while not postgres_ready: + await self.postgres_store.db_pool.updates.do_next_background_update(100) + postgres_ready = await ( + self.postgres_store.db_pool.updates.has_completed_background_updates() + ) + + async def run(self): + """Ports the SQLite database to a PostgreSQL database. + + When a fatal error is met, its message is assigned to the global "end_error" + variable. When this error comes with a stacktrace, its exec_info is assigned to + the global "end_error_exec_info" variable. + """ + global end_error + + try: + # we allow people to port away from outdated versions of sqlite. + self.sqlite_store = self.build_db_store( + DatabaseConnectionConfig("master-sqlite", self.sqlite_config), + allow_outdated_version=True, + ) + + # Check if all background updates are done, abort if not. + updates_complete = ( + await self.sqlite_store.db_pool.updates.has_completed_background_updates() + ) + if not updates_complete: + end_error = ( + "Pending background updates exist in the SQLite3 database." + " Please start Synapse again and wait until every update has finished" + " before running this script.\n" + ) + return + + self.postgres_store = self.build_db_store( + self.hs_config.database.get_single_database() + ) + + await self.run_background_updates_on_postgres() + + self.progress.set_state("Creating port tables") + + def create_port_table(txn): + txn.execute( + "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" + " table_name varchar(100) NOT NULL UNIQUE," + " forward_rowid bigint NOT NULL," + " backward_rowid bigint NOT NULL" + ")" + ) + + # The old port script created a table with just a "rowid" column. + # We want people to be able to rerun this script from an old port + # so that they can pick up any missing events that were not + # ported across. + def alter_table(txn): + txn.execute( + "ALTER TABLE IF EXISTS port_from_sqlite3" + " RENAME rowid TO forward_rowid" + ) + txn.execute( + "ALTER TABLE IF EXISTS port_from_sqlite3" + " ADD backward_rowid bigint NOT NULL DEFAULT 0" + ) + + try: + await self.postgres_store.db_pool.runInteraction( + "alter_table", alter_table + ) + except Exception: + # On Error Resume Next + pass + + await self.postgres_store.db_pool.runInteraction( + "create_port_table", create_port_table + ) + + # Step 2. Set up sequences + # + # We do this before porting the tables so that event if we fail half + # way through the postgres DB always have sequences that are greater + # than their respective tables. If we don't then creating the + # `DataStore` object will fail due to the inconsistency. + self.progress.set_state("Setting up sequence generators") + await self._setup_state_group_id_seq() + await self._setup_user_id_seq() + await self._setup_events_stream_seqs() + await self._setup_sequence( + "device_inbox_sequence", ("device_inbox", "device_federation_outbox") + ) + await self._setup_sequence( + "account_data_sequence", + ("room_account_data", "room_tags_revisions", "account_data"), + ) + await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) + await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) + await self._setup_auth_chain_sequence() + + # Step 3. Get tables. + self.progress.set_state("Fetching tables") + sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol( + table="sqlite_master", keyvalues={"type": "table"}, retcol="name" + ) + + postgres_tables = await self.postgres_store.db_pool.simple_select_onecol( + table="information_schema.tables", + keyvalues={}, + retcol="distinct table_name", + ) + + tables = set(sqlite_tables) & set(postgres_tables) + logger.info("Found %d tables", len(tables)) + + # Step 4. Figure out what still needs copying + self.progress.set_state("Checking on port progress") + setup_res = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(self.setup_table, table) + for table in tables + if table not in ["schema_version", "applied_schema_deltas"] + and not table.startswith("sqlite_") + ], + consumeErrors=True, + ) + ) + # Map from table name to args passed to `handle_table`, i.e. a tuple + # of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`. + tables_to_port_info_map = {r[0]: r[1:] for r in setup_res} + + # Step 5. Do the copying. + # + # This is slightly convoluted as we need to ensure tables are ported + # in the correct order due to foreign key constraints. + self.progress.set_state("Copying to postgres") + + constraints = await self.get_table_constraints() + tables_ported = set() # type: Set[str] + + while tables_to_port_info_map: + # Pulls out all tables that are still to be ported and which + # only depend on tables that are already ported (if any). + tables_to_port = [ + table + for table in tables_to_port_info_map + if not constraints.get(table, set()) - tables_ported + ] + + await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.handle_table, + table, + *tables_to_port_info_map.pop(table), + ) + for table in tables_to_port + ], + consumeErrors=True, + ) + ) + + tables_ported.update(tables_to_port) + + self.progress.done() + except Exception as e: + global end_error_exec_info + end_error = str(e) + end_error_exec_info = sys.exc_info() + logger.exception("") + finally: + reactor.stop() + + def _convert_rows(self, table, headers, rows): + bool_col_names = BOOLEAN_COLUMNS.get(table, []) + + bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] + + class BadValueException(Exception): + pass + + def conv(j, col): + if j in bool_cols: + return bool(col) + if isinstance(col, bytes): + return bytearray(col) + elif isinstance(col, str) and "\0" in col: + logger.warning( + "DROPPING ROW: NUL value in table %s col %s: %r", + table, + headers[j], + col, + ) + raise BadValueException() + return col + + outrows = [] + for row in rows: + try: + outrows.append( + tuple(conv(j, col) for j, col in enumerate(row) if j > 0) + ) + except BadValueException: + pass + + return outrows + + async def _setup_sent_transactions(self): + # Only save things from the last day + yesterday = int(time.time() * 1000) - 86400000 + + # And save the max transaction id from each destination + select = ( + "SELECT rowid, * FROM sent_transactions WHERE rowid IN (" + "SELECT max(rowid) FROM sent_transactions" + " GROUP BY destination" + ")" + ) + + def r(txn): + txn.execute(select) + rows = txn.fetchall() + headers = [column[0] for column in txn.description] + + ts_ind = headers.index("ts") + + return headers, [r for r in rows if r[ts_ind] < yesterday] + + headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) + + rows = self._convert_rows("sent_transactions", headers, rows) + + inserted_rows = len(rows) + if inserted_rows: + max_inserted_rowid = max(r[0] for r in rows) + + def insert(txn): + self.postgres_store.insert_many_txn( + txn, "sent_transactions", headers[1:], rows + ) + + await self.postgres_store.execute(insert) + else: + max_inserted_rowid = 0 + + def get_start_id(txn): + txn.execute( + "SELECT rowid FROM sent_transactions WHERE ts >= ?" + " ORDER BY rowid ASC LIMIT 1", + (yesterday,), + ) + + rows = txn.fetchall() + if rows: + return rows[0][0] + else: + return 1 + + next_chunk = await self.sqlite_store.execute(get_start_id) + next_chunk = max(max_inserted_rowid + 1, next_chunk) + + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={ + "table_name": "sent_transactions", + "forward_rowid": next_chunk, + "backward_rowid": 0, + }, + ) + + def get_sent_table_size(txn): + txn.execute( + "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) + ) + (size,) = txn.fetchone() + return int(size) + + remaining_count = await self.sqlite_store.execute(get_sent_table_size) + + total_count = remaining_count + inserted_rows + + return next_chunk, inserted_rows, total_count + + async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): + frows = await self.sqlite_store.execute_sql( + "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk + ) + + brows = await self.sqlite_store.execute_sql( + "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk + ) + + return frows[0][0] + brows[0][0] + + async def _get_already_ported_count(self, table): + rows = await self.postgres_store.execute_sql( + "SELECT count(*) FROM %s" % (table,) + ) + + return rows[0][0] + + async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk): + remaining, done = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._get_remaining_count_to_port, + table, + forward_chunk, + backward_chunk, + ), + run_in_background(self._get_already_ported_count, table), + ], + ) + ) + + remaining = int(remaining) if remaining else 0 + done = int(done) if done else 0 + + return done, remaining + done + + async def _setup_state_group_id_seq(self) -> None: + curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True + ) + + if not curr_id: + return + + def r(txn): + next_id = curr_id + 1 + txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r) + + async def _setup_user_id_seq(self) -> None: + curr_id = await self.sqlite_store.db_pool.runInteraction( + "setup_user_id_seq", find_max_generated_user_id_localpart + ) + + def r(txn): + next_id = curr_id + 1 + txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) + + async def _setup_events_stream_seqs(self) -> None: + """Set the event stream sequences to the correct values.""" + + # We get called before we've ported the events table, so we need to + # fetch the current positions from the SQLite store. + curr_forward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="events", keyvalues={}, retcol="MAX(stream_ordering)", allow_none=True + ) + + curr_backward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="events", + keyvalues={}, + retcol="MAX(-MIN(stream_ordering), 1)", + allow_none=True, + ) + + def _setup_events_stream_seqs_set_pos(txn): + if curr_forward_id: + txn.execute( + "ALTER SEQUENCE events_stream_seq RESTART WITH %s", + (curr_forward_id + 1,), + ) + + if curr_backward_id: + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (curr_backward_id + 1,), + ) + + await self.postgres_store.db_pool.runInteraction( + "_setup_events_stream_seqs", + _setup_events_stream_seqs_set_pos, + ) + + async def _setup_sequence( + self, sequence_name: str, stream_id_tables: Iterable[str] + ) -> None: + """Set a sequence to the correct value.""" + current_stream_ids = [] + for stream_id_table in stream_id_tables: + max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table=stream_id_table, + keyvalues={}, + retcol="COALESCE(MAX(stream_id), 1)", + allow_none=True, + ) + current_stream_ids.append(max_stream_id) + + next_id = max(current_stream_ids) + 1 + + def r(txn): + sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,) + txn.execute(sql + " %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction( + "_setup_%s" % (sequence_name,), r + ) + + async def _setup_auth_chain_sequence(self) -> None: + curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="event_auth_chains", + keyvalues={}, + retcol="MAX(chain_id)", + allow_none=True, + ) + + def r(txn): + txn.execute( + "ALTER SEQUENCE event_auth_chain_id RESTART WITH %s", + (curr_chain_id + 1,), + ) + + if curr_chain_id is not None: + await self.postgres_store.db_pool.runInteraction( + "_setup_event_auth_chain_id", + r, + ) + + +############################################## +# The following is simply UI stuff +############################################## + + +class Progress(object): + """Used to report progress of the port""" + + def __init__(self): + self.tables = {} + + self.start_time = int(time.time()) + + def add_table(self, table, cur, size): + self.tables[table] = { + "start": cur, + "num_done": cur, + "total": size, + "perc": int(cur * 100 / size), + } + + def update(self, table, num_done): + data = self.tables[table] + data["num_done"] = num_done + data["perc"] = int(num_done * 100 / data["total"]) + + def done(self): + pass + + +class CursesProgress(Progress): + """Reports progress to a curses window""" + + def __init__(self, stdscr): + self.stdscr = stdscr + + curses.use_default_colors() + curses.curs_set(0) + + curses.init_pair(1, curses.COLOR_RED, -1) + curses.init_pair(2, curses.COLOR_GREEN, -1) + + self.last_update = 0 + + self.finished = False + + self.total_processed = 0 + self.total_remaining = 0 + + super(CursesProgress, self).__init__() + + def update(self, table, num_done): + super(CursesProgress, self).update(table, num_done) + + self.total_processed = 0 + self.total_remaining = 0 + for data in self.tables.values(): + self.total_processed += data["num_done"] - data["start"] + self.total_remaining += data["total"] - data["num_done"] + + self.render() + + def render(self, force=False): + now = time.time() + + if not force and now - self.last_update < 0.2: + # reactor.callLater(1, self.render) + return + + self.stdscr.clear() + + rows, cols = self.stdscr.getmaxyx() + + duration = int(now) - int(self.start_time) + + minutes, seconds = divmod(duration, 60) + duration_str = "%02dm %02ds" % (minutes, seconds) + + if self.finished: + status = "Time spent: %s (Done!)" % (duration_str,) + else: + + if self.total_processed > 0: + left = float(self.total_remaining) / self.total_processed + + est_remaining = (int(now) - self.start_time) * left + est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60) + else: + est_remaining_str = "Unknown" + status = "Time spent: %s (est. remaining: %s)" % ( + duration_str, + est_remaining_str, + ) + + self.stdscr.addstr(0, 0, status, curses.A_BOLD) + + max_len = max(len(t) for t in self.tables.keys()) + + left_margin = 5 + middle_space = 1 + + items = self.tables.items() + items = sorted(items, key=lambda i: (i[1]["perc"], i[0])) + + for i, (table, data) in enumerate(items): + if i + 2 >= rows: + break + + perc = data["perc"] + + color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) + + self.stdscr.addstr( + i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color + ) + + size = 20 + + progress = "[%s%s]" % ( + "#" * int(perc * size / 100), + " " * (size - int(perc * size / 100)), + ) + + self.stdscr.addstr( + i + 2, + left_margin + max_len + middle_space, + "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), + ) + + if self.finished: + self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") + + self.stdscr.refresh() + self.last_update = time.time() + + def done(self): + self.finished = True + self.render(True) + self.stdscr.getch() + + def set_state(self, state): + self.stdscr.clear() + self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) + self.stdscr.refresh() + + +class TerminalProgress(Progress): + """Just prints progress to the terminal""" + + def update(self, table, num_done): + super(TerminalProgress, self).update(table, num_done) + + data = self.tables[table] + + print( + "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) + ) + + def set_state(self, state): + print(state + "...") + + +############################################## +############################################## + + +def main(): + parser = argparse.ArgumentParser( + description="A script to port an existing synapse SQLite database to" + " a new PostgreSQL database." + ) + parser.add_argument("-v", action="store_true") + parser.add_argument( + "--sqlite-database", + required=True, + help="The snapshot of the SQLite database file. This must not be" + " currently used by a running synapse server", + ) + parser.add_argument( + "--postgres-config", + type=argparse.FileType("r"), + required=True, + help="The database config file for the PostgreSQL database", + ) + parser.add_argument( + "--curses", action="store_true", help="display a curses based progress UI" + ) + + parser.add_argument( + "--batch-size", + type=int, + default=1000, + help="The number of rows to select from the SQLite table each" + " iteration [default=1000]", + ) + + args = parser.parse_args() + + logging_config = { + "level": logging.DEBUG if args.v else logging.INFO, + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + } + + if args.curses: + logging_config["filename"] = "port-synapse.log" + + logging.basicConfig(**logging_config) + + sqlite_config = { + "name": "sqlite3", + "args": { + "database": args.sqlite_database, + "cp_min": 1, + "cp_max": 1, + "check_same_thread": False, + }, + } + + hs_config = yaml.safe_load(args.postgres_config) + + if "database" not in hs_config: + sys.stderr.write("The configuration file must have a 'database' section.\n") + sys.exit(4) + + postgres_config = hs_config["database"] + + if "name" not in postgres_config: + sys.stderr.write("Malformed database config: no 'name'\n") + sys.exit(2) + if postgres_config["name"] != "psycopg2": + sys.stderr.write("Database must use the 'psycopg2' connector.\n") + sys.exit(3) + + config = HomeServerConfig() + config.parse_config_dict(hs_config, "", "") + + def start(stdscr=None): + if stdscr: + progress = CursesProgress(stdscr) + else: + progress = TerminalProgress() + + porter = Porter( + sqlite_config=sqlite_config, + progress=progress, + batch_size=args.batch_size, + hs_config=config, + ) + + @defer.inlineCallbacks + def run(): + with LoggingContext("synapse_port_db_run"): + yield defer.ensureDeferred(porter.run()) + + reactor.callWhenRunning(run) + + reactor.run() + + if args.curses: + curses.wrapper(start) + else: + start() + + if end_error: + if end_error_exec_info: + exc_type, exc_value, exc_traceback = end_error_exec_info + traceback.print_exception(exc_type, exc_value, exc_traceback) + + sys.stderr.write(end_error) + + sys.exit(5) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py new file mode 100755 index 0000000000..f43676afaa --- /dev/null +++ b/synapse/_scripts/update_synapse_database.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import sys + +import yaml +from matrix_common.versionstring import get_distribution_version_string + +from twisted.internet import defer, reactor + +from synapse.config.homeserver import HomeServerConfig +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.server import HomeServer +from synapse.storage import DataStore + +logger = logging.getLogger("update_database") + + +class MockHomeserver(HomeServer): + DATASTORE_CLASS = DataStore + + def __init__(self, config, **kwargs): + super(MockHomeserver, self).__init__( + config.server.server_name, reactor=reactor, config=config, **kwargs + ) + + self.version_string = "Synapse/" + get_distribution_version_string( + "matrix-synapse" + ) + + +def run_background_updates(hs): + store = hs.get_datastores().main + + async def run_background_updates(): + await store.db_pool.updates.run_background_updates(sleep=False) + # Stop the reactor to exit the script once every background update is run. + reactor.stop() + + def run(): + # Apply all background updates on the database. + defer.ensureDeferred( + run_as_background_process("background_updates", run_background_updates) + ) + + reactor.callWhenRunning(run) + + reactor.run() + + +def main(): + parser = argparse.ArgumentParser( + description=( + "Updates a synapse database to the latest schema and optionally runs background updates" + " on it." + ) + ) + parser.add_argument("-v", action="store_true") + parser.add_argument( + "--database-config", + type=argparse.FileType("r"), + required=True, + help="Synapse configuration file, giving the details of the database to be updated", + ) + parser.add_argument( + "--run-background-updates", + action="store_true", + required=False, + help="run background updates after upgrading the database schema", + ) + + args = parser.parse_args() + + logging_config = { + "level": logging.DEBUG if args.v else logging.INFO, + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + } + + logging.basicConfig(**logging_config) + + # Load, process and sanity-check the config. + hs_config = yaml.safe_load(args.database_config) + + if "database" not in hs_config: + sys.stderr.write("The configuration file must have a 'database' section.\n") + sys.exit(4) + + config = HomeServerConfig() + config.parse_config_dict(hs_config, "", "") + + # Instantiate and initialise the homeserver object. + hs = MockHomeserver(config) + + # Setup instantiates the store within the homeserver object and updates the + # DB. + hs.setup() + + if args.run_background_updates: + run_background_updates(hs) + + +if __name__ == "__main__": + main() diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1265738dc1..8e19e2fc26 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -383,7 +383,7 @@ class RootConfig: Build a default configuration file This is used when the user explicitly asks us to generate a config file - (eg with --generate_config). + (eg with --generate-config). Args: config_dir_path: The path where the config files are kept. Used to diff --git a/tox.ini b/tox.ini index 04b972e2c5..8d6aa7580b 100644 --- a/tox.ini +++ b/tox.ini @@ -38,15 +38,7 @@ lint_targets = setup.py synapse tests - scripts # annoyingly, black doesn't find these so we have to list them - scripts/export_signing_key - scripts/generate_config - scripts/generate_log_config - scripts/hash_password - scripts/register_new_matrix_user - scripts/synapse_port_db - scripts/update_synapse_database scripts-dev scripts-dev/build_debian_packages scripts-dev/sign_json -- cgit 1.4.1