From 1a815fb04f1d17286be27379dd7463936606bd3a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 11:33:30 +0100 Subject: Don't hit DB for noop replications queries --- synapse/handlers/typing.py | 3 +++ synapse/storage/account_data.py | 3 +++ synapse/storage/presence.py | 3 +++ synapse/storage/push_rule.py | 3 +++ synapse/storage/tags.py | 3 +++ 5 files changed, 15 insertions(+) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 861b8f7989..5589296c09 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -221,6 +221,9 @@ class TypingHandler(object): def get_all_typing_updates(self, last_id, current_id): # TODO: Work out a way to do this without scanning the entire state. + if last_id == current_id: + return [] + rows = [] for room_id, serial in self._room_serials.items(): if last_id < serial and serial <= current_id: diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index ec7e8d40d2..3fa226e92d 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -138,6 +138,9 @@ class AccountDataStore(SQLBaseStore): A deferred pair of lists of tuples of stream_id int, user_id string, room_id string, type string, and content string. """ + if last_room_id == current_id and last_global_id == current_id: + return defer.succeed(([], [])) + def get_updated_account_data_txn(txn): sql = ( "SELECT stream_id, user_id, account_data_type, content" diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 3fab57a7e8..d03f7c541e 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -118,6 +118,9 @@ class PresenceStore(SQLBaseStore): ) def get_all_presence_updates(self, last_id, current_id): + if last_id == current_id: + return defer.succeed([]) + def get_all_presence_updates_txn(txn): sql = ( "SELECT stream_id, user_id, state, last_active_ts," diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 786d6f6d67..8183b7f1b0 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -421,6 +421,9 @@ class PushRuleStore(SQLBaseStore): def get_all_push_rule_updates(self, last_id, current_id, limit): """Get all the push rules changes that have happend on the server""" + if last_id == current_id: + return defer.succeed([]) + def get_all_push_rule_updates_txn(txn): sql = ( "SELECT stream_id, event_stream_ordering, user_id, rule_id," diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 9da23f34cb..5a2c1aa59b 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -68,6 +68,9 @@ class TagsStore(SQLBaseStore): A deferred list of tuples of stream_id int, user_id string, room_id string, tag string and content string. """ + if last_id == current_id: + defer.returnValue([]) + def get_all_updated_tags_txn(txn): sql = ( "SELECT stream_id, user_id, room_id" -- cgit 1.4.1 From 17aab5827a1a1eace4e44d130eef7da4dda6984f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 11:55:31 +0100 Subject: Add some logging for when servers ask for missing events --- synapse/federation/federation_server.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index f1d231b9d8..9f2a64dede 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -377,10 +377,20 @@ class FederationServer(FederationBase): @log_function def on_get_missing_events(self, origin, room_id, earliest_events, latest_events, limit, min_depth): + logger.info( + "on_get_missing_events: earliest_events: %r, latest_events: %r," + " limit: %d, min_depth: %d", + earliest_events, latest_events, limit, min_depth + ) missing_events = yield self.handler.on_get_missing_events( origin, room_id, earliest_events, latest_events, limit, min_depth ) + if len(missing_events) < 5: + logger.info("Returning %d events: %r", len(missing_events), missing_events) + else: + logger.info("Returning %d events", len(missing_events)) + time_now = self._clock.time_msec() defer.returnValue({ @@ -490,6 +500,11 @@ class FederationServer(FederationBase): latest = set(latest) latest |= seen + logger.info( + "Missing %d events for room %r: %r...", + len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] + ) + missing_events = yield self.get_missing_events( origin, pdu.room_id, @@ -517,6 +532,10 @@ class FederationServer(FederationBase): prevs = {e_id for e_id, _ in pdu.prev_events} seen = set(have_seen.keys()) if prevs - seen: + logger.info( + "Still missing %d events for room %r: %r...", + len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] + ) fetch_state = True if fetch_state: -- cgit 1.4.1 From 1fd6eb695d1fffbe830faf50c13607116300095b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:15:18 +0100 Subject: Enable auth on federation PublicRoomList --- synapse/federation/transport/server.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a1a334955f..ab9f38f010 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -532,11 +532,6 @@ class PublicRoomList(BaseFederationServlet): data = yield self.room_list_handler.get_local_public_room_list() defer.returnValue((200, data)) - # Avoid doing remote HS authorization checks which are done by default by - # BaseFederationServlet. - def _wrap(self, code): - return code - SERVLET_CLASSES = ( FederationSendServlet, -- cgit 1.4.1 From efeabd31801224cbacd31b61ff0d869b70b1820d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:23:15 +0100 Subject: Log user that is making /publicRooms calls --- synapse/rest/client/v1/room.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index db52a1fc39..604c2a565e 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -279,6 +279,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): + try: + yield self.auth.get_user_by_req(request) + except AuthError: + # This endpoint isn't authed, but its useful to know who's hitting + # it if they *do* supply an access token + pass + handler = self.hs.get_room_list_handler() data = yield handler.get_aggregated_public_room_list() -- cgit 1.4.1 From d88faf92d16d9384433452e4fb7901fd2bd6eda4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:39:31 +0100 Subject: Fix up federation PublicRoomList --- synapse/federation/transport/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ab9f38f010..6fc3e2207c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -528,7 +528,7 @@ class PublicRoomList(BaseFederationServlet): PATH = "/publicRooms" @defer.inlineCallbacks - def on_GET(self, request): + def on_GET(self, origin, content, query): data = yield self.room_list_handler.get_local_public_room_list() defer.returnValue((200, data)) -- cgit 1.4.1 From 690029d1a3ebd26f56656a723fefdeafd71310e4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:47:42 +0100 Subject: Don't make rooms visibile by default --- synapse/rest/client/v1/room.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 604c2a565e..86fbe2747d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -72,8 +72,6 @@ class RoomCreateRestServlet(ClientV1RestServlet): def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) - # default visibility - user_supplied_config.setdefault("visibility", "public") return user_supplied_config def on_OPTIONS(self, request): -- cgit 1.4.1 From defa28efa186013fab18b3da76f60273cb6c3bb1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 8 Jun 2016 15:11:31 +0100 Subject: Disable the synchrotron on jenkins until the sytest support lands (#855) * Disable the synchrotron on jenkins until the sytest support lands * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins --- jenkins-dendron-postgres.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 8e3a4c51a9..d15836e6bf 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -73,11 +73,12 @@ git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling b ./jenkins/prep_sytest_for_postgres.sh +mkdir -p var + echo >&2 "Running sytest with PostgreSQL"; ./jenkins/install_and_run.sh --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ - --synchrotron \ --pusher \ --port-base $PORT_BASE -- cgit 1.4.1 From 81c07a32fd27260b5112dcc87845a9e87fa5db58 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 15:43:37 +0100 Subject: Pull full state for each room all at once --- synapse/handlers/room.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 9fd34588dd..ae44c7a556 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -20,7 +20,7 @@ from ._base import BaseHandler from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken from synapse.api.constants import ( - EventTypes, JoinRules, RoomCreationPreset, + EventTypes, JoinRules, RoomCreationPreset, Membership, ) from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.util import stringutils @@ -367,14 +367,10 @@ class RoomListHandler(BaseHandler): @defer.inlineCallbacks def handle_room(room_id): - # We pull each bit of state out indvidually to avoid pulling the - # full state into memory. Due to how the caching works this should - # be fairly quick, even if not originally in the cache. - def get_state(etype, state_key): - return self.state_handler.get_current_state(room_id, etype, state_key) + current_state = yield self.state_handler.get_current_state(room_id) # Double check that this is actually a public room. - join_rules_event = yield get_state(EventTypes.JoinRules, "") + join_rules_event = current_state.get((EventTypes.JoinRules, "")) if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) if join_rule and join_rule != JoinRules.PUBLIC: @@ -382,47 +378,51 @@ class RoomListHandler(BaseHandler): result = {"room_id": room_id} - joined_users = yield self.store.get_users_in_room(room_id) - if len(joined_users) == 0: + num_joined_users = len([ + 1 for _, event in current_state.items() + if event.type == EventTypes.Member + and event.membership == Membership.JOIN + ]) + if num_joined_users == 0: return - result["num_joined_members"] = len(joined_users) + result["num_joined_members"] = num_joined_users aliases = yield self.store.get_aliases_for_room(room_id) if aliases: result["aliases"] = aliases - name_event = yield get_state(EventTypes.Name, "") + name_event = yield current_state.get((EventTypes.Name, "")) if name_event: name = name_event.content.get("name", None) if name: result["name"] = name - topic_event = yield get_state(EventTypes.Topic, "") + topic_event = current_state.get((EventTypes.Topic, "")) if topic_event: topic = topic_event.content.get("topic", None) if topic: result["topic"] = topic - canonical_event = yield get_state(EventTypes.CanonicalAlias, "") + canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) if canonical_event: canonical_alias = canonical_event.content.get("alias", None) if canonical_alias: result["canonical_alias"] = canonical_alias - visibility_event = yield get_state(EventTypes.RoomHistoryVisibility, "") + visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) visibility = None if visibility_event: visibility = visibility_event.content.get("history_visibility", None) result["world_readable"] = visibility == "world_readable" - guest_event = yield get_state(EventTypes.GuestAccess, "") + guest_event = current_state.get((EventTypes.GuestAccess, "")) guest = None if guest_event: guest = guest_event.content.get("guest_access", None) result["guest_can_join"] = guest == "can_join" - avatar_event = yield get_state("m.room.avatar", "") + avatar_event = current_state.get(("m.room.avatar", "")) if avatar_event: avatar_url = avatar_event.content.get("url", None) if avatar_url: -- cgit 1.4.1 From 6e7dc7c7dde377794c23d5db6f25ffacfb08e82a Mon Sep 17 00:00:00 2001 From: Negar Fazeli Date: Wed, 8 Jun 2016 19:16:46 +0200 Subject: Fix a bug caused by a change in auth_handler function Fix the relevant unit test cases --- synapse/handlers/register.py | 4 ++-- tests/handlers/test_register.py | 9 +++------ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index bbc07b045e..e0aaefe7be 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -388,8 +388,8 @@ class RegistrationHandler(BaseHandler): user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - auth_handler = self.hs.get_handlers().auth_handler - token = auth_handler.generate_short_term_login_token(user_id, duration_seconds) + token = self.auth_handler().generate_short_term_login_token( + user_id, duration_seconds) if need_register: yield self.store.register( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 9d5c653b45..69a5e5b1d4 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -41,14 +41,15 @@ class RegistrationTestCase(unittest.TestCase): handlers=None, http_client=None, expire_access_token=True) + self.auth_handler = Mock( + generate_short_term_login_token=Mock(return_value='secret')) self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.hs.get_handlers().profile_handler = Mock() self.mock_handler = Mock(spec=[ "generate_short_term_login_token", ]) - - self.hs.get_handlers().auth_handler = self.mock_handler + self.hs.get_auth_handler = Mock(return_value=self.auth_handler) @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): @@ -56,8 +57,6 @@ class RegistrationTestCase(unittest.TestCase): local_part = "someone" display_name = "someone" user_id = "@someone:test" - mock_token = self.mock_handler.generate_short_term_login_token - mock_token.return_value = 'secret' result_user_id, result_token = yield self.handler.get_or_create_user( local_part, display_name, duration_ms) self.assertEquals(result_user_id, user_id) @@ -75,8 +74,6 @@ class RegistrationTestCase(unittest.TestCase): local_part = "frank" display_name = "Frank" user_id = "@frank:test" - mock_token = self.mock_handler.generate_short_term_login_token - mock_token.return_value = 'secret' result_user_id, result_token = yield self.handler.get_or_create_user( local_part, display_name, duration_ms) self.assertEquals(result_user_id, user_id) -- cgit 1.4.1 From 95f305c35a790e8f10fef7e16268dfaba6bc4c31 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Jun 2016 10:57:11 +0100 Subject: Remove redundant exception log in /events --- synapse/rest/client/v1/events.py | 45 +++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index d1afa0f0d5..498bb9e18a 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -45,30 +45,27 @@ class EventStreamRestServlet(ClientV1RestServlet): raise SynapseError(400, "Guest users must specify room_id param") if "room_id" in request.args: room_id = request.args["room_id"][0] - try: - handler = self.handlers.event_stream_handler - pagin_config = PaginationConfig.from_request(request) - timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS - if "timeout" in request.args: - try: - timeout = int(request.args["timeout"][0]) - except ValueError: - raise SynapseError(400, "timeout must be in milliseconds.") - - as_client_event = "raw" not in request.args - - chunk = yield handler.get_stream( - requester.user.to_string(), - pagin_config, - timeout=timeout, - as_client_event=as_client_event, - affect_presence=(not is_guest), - room_id=room_id, - is_guest=is_guest, - ) - except: - logger.exception("Event stream failed") - raise + + handler = self.handlers.event_stream_handler + pagin_config = PaginationConfig.from_request(request) + timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS + if "timeout" in request.args: + try: + timeout = int(request.args["timeout"][0]) + except ValueError: + raise SynapseError(400, "timeout must be in milliseconds.") + + as_client_event = "raw" not in request.args + + chunk = yield handler.get_stream( + requester.user.to_string(), + pagin_config, + timeout=timeout, + as_client_event=as_client_event, + affect_presence=(not is_guest), + room_id=room_id, + is_guest=is_guest, + ) defer.returnValue((200, chunk)) -- cgit 1.4.1 From eba4ff1bcbd99ae5b23f7cdae2306662319d3b4a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Jun 2016 11:29:43 +0100 Subject: 502 on /thumbnail when can't contact remote server --- synapse/rest/media/v1/media_repository.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index d96bf9afe2..2468c3ac42 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -26,6 +26,7 @@ from .thumbnailer import Thumbnailer from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.util.stringutils import random_string +from synapse.api.errors import SynapseError from twisted.internet import defer, threads @@ -134,10 +135,15 @@ class MediaRepository(object): request_path = "/".join(( "/_matrix/media/v1/download", server_name, media_id, )) - length, headers = yield self.client.get_file( - server_name, request_path, output_stream=f, - max_size=self.max_upload_size, - ) + try: + length, headers = yield self.client.get_file( + server_name, request_path, output_stream=f, + max_size=self.max_upload_size, + ) + except Exception as e: + logger.warn("Failed to fetch remoted media %r", e) + raise SynapseError(502, "Failed to fetch remoted media") + media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() -- cgit 1.4.1 From 7dbb473339bc41daf6c05b64756f97e011f653f5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 9 Jun 2016 18:50:38 +0100 Subject: Add function to load config without generating it Renames ``load_config`` to ``load_or_generate_config`` Adds a method called ``load_config`` that just loads the config. The main synapse.app.homeserver will continue to use ``load_or_generate_config`` to retain backwards compat. However new worker processes can use ``load_config`` to load the config avoiding some of the cruft needed to generate the config. As the new ``load_config`` method is expected to be used by new configs it removes support for the legacy commandline overrides that ``load_or_generate_config`` supports --- synapse/app/homeserver.py | 3 +- synapse/config/_base.py | 147 ++++++++++++++++++++++++++++++------------ tests/config/test_generate.py | 2 +- tests/config/test_load.py | 22 ++++++- 4 files changed, 126 insertions(+), 48 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 22e1721fc4..40ffd9bf0d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -266,10 +266,9 @@ def setup(config_options): HomeServer """ try: - config = HomeServerConfig.load_config( + config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, - generate_section="Homeserver" ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 7449f36491..af9f17bf7b 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -157,9 +157,40 @@ class Config(object): return default_config, config @classmethod - def load_config(cls, description, argv, generate_section=None): + def load_config(cls, description, argv): + config_parser = argparse.ArgumentParser( + description=description, + ) + config_parser.add_argument( + "-c", "--config-path", + action="append", + metavar="CONFIG_FILE", + help="Specify config file. Can be given multiple times and" + " may specify directories containing *.yaml files." + ) + + config_parser.add_argument( + "--keys-directory", + metavar="DIRECTORY", + help="Where files such as certs and signing keys are stored when" + " their location is given explicitly in the config." + " Defaults to the directory containing the last config file", + ) + + config_args = config_parser.parse_args(argv) + + config_files = find_config_files(search_paths=config_args.config_path) + obj = cls() + obj.read_config_files( + config_files, + keys_directory=config_args.keys_directory, + generate_keys=False, + ) + return obj + @classmethod + def load_or_generate_config(cls, description, argv): config_parser = argparse.ArgumentParser(add_help=False) config_parser.add_argument( "-c", "--config-path", @@ -176,7 +207,7 @@ class Config(object): config_parser.add_argument( "--report-stats", action="store", - help="Stuff", + help="Whether the generated config reports anonymized usage statistics", choices=["yes", "no"] ) config_parser.add_argument( @@ -197,36 +228,11 @@ class Config(object): ) config_args, remaining_args = config_parser.parse_known_args(argv) + config_files = find_config_files(search_paths=config_args.config_path) + generate_keys = config_args.generate_keys - config_files = [] - if config_args.config_path: - for config_path in config_args.config_path: - if os.path.isdir(config_path): - # We accept specifying directories as config paths, we search - # inside that directory for all files matching *.yaml, and then - # we apply them in *sorted* order. - files = [] - for entry in os.listdir(config_path): - entry_path = os.path.join(config_path, entry) - if not os.path.isfile(entry_path): - print ( - "Found subdirectory in config directory: %r. IGNORING." - ) % (entry_path, ) - continue - - if not entry.endswith(".yaml"): - print ( - "Found file in config directory that does not" - " end in '.yaml': %r. IGNORING." - ) % (entry_path, ) - continue - - files.append(entry_path) - - config_files.extend(sorted(files)) - else: - config_files.append(config_path) + obj = cls() if config_args.generate_config: if config_args.report_stats is None: @@ -299,28 +305,43 @@ class Config(object): " -c CONFIG-FILE\"" ) - if config_args.keys_directory: - config_dir_path = config_args.keys_directory - else: - config_dir_path = os.path.dirname(config_args.config_path[-1]) - config_dir_path = os.path.abspath(config_dir_path) + obj.read_config_files( + config_files, + keys_directory=config_args.keys_directory, + generate_keys=generate_keys, + ) + + if generate_keys: + return None + + obj.invoke_all("read_arguments", args) + + return obj + + def read_config_files(self, config_files, keys_directory=None, + generate_keys=False): + if not keys_directory: + keys_directory = os.path.dirname(config_files[-1]) + + config_dir_path = os.path.abspath(keys_directory) specified_config = {} for config_file in config_files: - yaml_config = cls.read_config_file(config_file) + yaml_config = self.read_config_file(config_file) specified_config.update(yaml_config) if "server_name" not in specified_config: raise ConfigError(MISSING_SERVER_NAME) server_name = specified_config["server_name"] - _, config = obj.generate_config( + _, config = self.generate_config( config_dir_path=config_dir_path, server_name=server_name, is_generating_file=False, ) config.pop("log_config") config.update(specified_config) + if "report_stats" not in config: raise ConfigError( MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + @@ -328,11 +349,51 @@ class Config(object): ) if generate_keys: - obj.invoke_all("generate_files", config) + self.invoke_all("generate_files", config) return - obj.invoke_all("read_config", config) - - obj.invoke_all("read_arguments", args) - - return obj + self.invoke_all("read_config", config) + + +def find_config_files(search_paths): + """Finds config files using a list of search paths. If a path is a file + then that file path is added to the list. If a search path is a directory + then all the "*.yaml" files in that directory are added to the list in + sorted order. + + Args: + search_paths(list(str)): A list of paths to search. + + Returns: + list(str): A list of file paths. + """ + + config_files = [] + if search_paths: + for config_path in search_paths: + if os.path.isdir(config_path): + # We accept specifying directories as config paths, we search + # inside that directory for all files matching *.yaml, and then + # we apply them in *sorted* order. + files = [] + for entry in os.listdir(config_path): + entry_path = os.path.join(config_path, entry) + if not os.path.isfile(entry_path): + print ( + "Found subdirectory in config directory: %r. IGNORING." + ) % (entry_path, ) + continue + + if not entry.endswith(".yaml"): + print ( + "Found file in config directory that does not" + " end in '.yaml': %r. IGNORING." + ) % (entry_path, ) + continue + + files.append(entry_path) + + config_files.extend(sorted(files)) + else: + config_files.append(config_path) + return config_files diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 4329d73974..8f57fbeb23 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -30,7 +30,7 @@ class ConfigGenerationTestCase(unittest.TestCase): shutil.rmtree(self.dir) def test_generate_config_generates_files(self): - HomeServerConfig.load_config("", [ + HomeServerConfig.load_or_generate_config("", [ "--generate-config", "-c", self.file, "--report-stats=yes", diff --git a/tests/config/test_load.py b/tests/config/test_load.py index bf46233c5c..161a87d7e3 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -34,6 +34,8 @@ class ConfigLoadingTestCase(unittest.TestCase): self.generate_config_and_remove_lines_containing("server_name") with self.assertRaises(Exception): HomeServerConfig.load_config("", ["-c", self.file]) + with self.assertRaises(Exception): + HomeServerConfig.load_or_generate_config("", ["-c", self.file]) def test_generates_and_loads_macaroon_secret_key(self): self.generate_config() @@ -54,11 +56,24 @@ class ConfigLoadingTestCase(unittest.TestCase): "was: %r" % (config.macaroon_secret_key,) ) + config = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) + self.assertTrue( + hasattr(config, "macaroon_secret_key"), + "Want config to have attr macaroon_secret_key" + ) + if len(config.macaroon_secret_key) < 5: + self.fail( + "Want macaroon secret key to be string of at least length 5," + "was: %r" % (config.macaroon_secret_key,) + ) + def test_load_succeeds_if_macaroon_secret_key_missing(self): self.generate_config_and_remove_lines_containing("macaroon") config1 = HomeServerConfig.load_config("", ["-c", self.file]) config2 = HomeServerConfig.load_config("", ["-c", self.file]) + config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key) + self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key) def test_disable_registration(self): self.generate_config() @@ -70,14 +85,17 @@ class ConfigLoadingTestCase(unittest.TestCase): config = HomeServerConfig.load_config("", ["-c", self.file]) self.assertFalse(config.enable_registration) + config = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) + self.assertFalse(config.enable_registration) + # Check that either config value is clobbered by the command line. - config = HomeServerConfig.load_config("", [ + config = HomeServerConfig.load_or_generate_config("", [ "-c", self.file, "--enable-registration" ]) self.assertTrue(config.enable_registration) def generate_config(self): - HomeServerConfig.load_config("", [ + HomeServerConfig.load_or_generate_config("", [ "--generate-config", "-c", self.file, "--report-stats=yes", -- cgit 1.4.1 From 50f69e2ef266cf08aaff0311705fcf56dc1bd9f3 Mon Sep 17 00:00:00 2001 From: Bartek Rutkowski Date: Fri, 10 Jun 2016 11:33:43 +0100 Subject: Change /bin/bash to /bin/sh in tox.ini No features of Bash are used here, so using /bin/sh makes it more portable to systems that don't have Bash natively (like BSD systems). --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 757b7189c3..52d93c65e5 100644 --- a/tox.ini +++ b/tox.ini @@ -11,7 +11,7 @@ deps = setenv = PYTHONDONTWRITEBYTECODE = no_byte_code commands = - /bin/bash -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ + /bin/sh -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}" {env:DUMP_COVERAGE_COMMAND:coverage report -m} @@ -26,4 +26,4 @@ skip_install = True basepython = python2.7 deps = flake8 -commands = /bin/bash -c "flake8 synapse tests {env:PEP8SUFFIX:}" +commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}" -- cgit 1.4.1 From fdc015c6e9b023c5cb87491b7e64efd46eedd129 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 10 Jun 2016 16:30:26 +0100 Subject: Enable testing the synchrotron on jenkins --- jenkins-dendron-postgres.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index d15836e6bf..7e6f24aa7d 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -80,6 +80,7 @@ echo >&2 "Running sytest with PostgreSQL"; --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ --pusher \ + --synchrotron \ --port-base $PORT_BASE cd .. -- cgit 1.4.1 From 36e2aade8790f3f2d86e8f6cc8a6de21e8bec4fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Jun 2016 13:25:29 +0100 Subject: Make get_domain_from_id throw SynapseError on invalid ID --- synapse/types.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/types.py b/synapse/types.py index 7b6ae44bdd..f639651a73 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -22,7 +22,10 @@ Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"]) def get_domain_from_id(string): - return string.split(":", 1)[1] + try: + return string.split(":", 1)[1] + except IndexError: + raise SynapseError(400, "Invalid ID: %r", string) class DomainSpecificString( -- cgit 1.4.1 From 255c229f23635f7dc0299de5d54460eea2e2af1c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 15 Jun 2016 10:23:03 +0100 Subject: Work around TLS bug in twisted Wrap up twisted's FileBodyProducer to work around https://twistedmatrix.com/trac/ticket/8473. Hopefully this fixes https://matrix.org/jira/browse/SYN-700. --- synapse/http/client.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index c7fa692435..3ec9bc7faf 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -24,12 +24,13 @@ from synapse.http.endpoint import SpiderEndpoint from canonicaljson import encode_canonical_json -from twisted.internet import defer, reactor, ssl, protocol +from twisted.internet import defer, reactor, ssl, protocol, task from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint from twisted.web.client import ( BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent, - readBody, FileBodyProducer, PartialDownloadError, + readBody, PartialDownloadError, ) +from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone @@ -468,3 +469,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def creatorForNetloc(self, hostname, port): return self + + +class FileBodyProducer(TwistedFileBodyProducer): + """Workaround for https://twistedmatrix.com/trac/ticket/8473 + + We override the pauseProducing and resumeProducing methods in twisted's + FileBodyProducer so that they do not raise exceptions if the task has + already completed. + """ + + def pauseProducing(self): + try: + super(FileBodyProducer, self).pauseProducing() + except task.TaskDone: + # task has already completed + pass + + def resumeProducing(self): + try: + super(FileBodyProducer, self).resumeProducing() + except task.NotPaused: + # task was not paused (probably because it had already completed) + pass -- cgit 1.4.1 From b31c49d6760b4cdeefc8e0b43d6639be4576e249 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 10:58:07 +0100 Subject: Correctly mark backfilled events as backfilled --- synapse/handlers/federation.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ff83c608e7..c2df43e2f6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -345,19 +345,21 @@ class FederationHandler(BaseHandler): ) missing_auth = required_auth - set(auth_events) - results = yield defer.gatherResults( - [ - self.replication_layer.get_pdu( - [dest], - event_id, - outlier=True, - timeout=10000, - ) - for event_id in missing_auth - ], - consumeErrors=True - ).addErrback(unwrapFirstError) - auth_events.update({a.event_id: a for a in results}) + if missing_auth: + logger.info("Missing auth for backfill: %r", missing_auth) + results = yield defer.gatherResults( + [ + self.replication_layer.get_pdu( + [dest], + event_id, + outlier=True, + timeout=10000, + ) + for event_id in missing_auth + ], + consumeErrors=True + ).addErrback(unwrapFirstError) + auth_events.update({a.event_id: a for a in results}) ev_infos = [] for a in auth_events.values(): @@ -399,7 +401,7 @@ class FederationHandler(BaseHandler): # previous to work out the state. # TODO: We can probably do something more clever here. yield self._handle_new_event( - dest, event + dest, event, backfilled=True, ) defer.returnValue(events) -- cgit 1.4.1 From d41a1a91d3cce28e5416a91b7494d079e4c765f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 15:12:59 +0100 Subject: Linearize fetching of gaps on incoming events This potentially stops the server from doing multiple requests for the same data. --- synapse/federation/federation_base.py | 3 ++ synapse/federation/federation_client.py | 2 + synapse/federation/federation_server.py | 88 +++++++++++++++++++-------------- synapse/federation/replication.py | 2 + 4 files changed, 58 insertions(+), 37 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index a0b7cb7963..da2f5e8cfd 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -31,6 +31,9 @@ logger = logging.getLogger(__name__) class FederationBase(object): + def __init__(self, hs): + pass + @defer.inlineCallbacks def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False, include_none=False): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d835c1b038..b06387051c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -52,6 +52,8 @@ sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"]) class FederationClient(FederationBase): + def __init__(self, hs): + super(FederationClient, self).__init__(hs) def start_get_pdu_cache(self): self._get_pdu_cache = ExpiringCache( diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9f2a64dede..fe92457ba1 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -19,6 +19,7 @@ from twisted.internet import defer from .federation_base import FederationBase from .units import Transaction, Edu +from synapse.util.async import Linearizer from synapse.util.logutils import log_function from synapse.events import FrozenEvent import synapse.metrics @@ -44,6 +45,11 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[ class FederationServer(FederationBase): + def __init__(self, hs): + super(FederationServer, self).__init__(hs) + + self._room_pdu_linearizer = Linearizer() + def set_handler(self, handler): """Sets the handler that the replication layer will use to communicate receipt of new PDUs from other home servers. The required methods are @@ -491,43 +497,51 @@ class FederationServer(FederationBase): pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: if get_missing and prevs - seen: - latest = yield self.store.get_latest_event_ids_in_room( - pdu.room_id - ) - - # We add the prev events that we have seen to the latest - # list to ensure the remote server doesn't give them to us - latest = set(latest) - latest |= seen - - logger.info( - "Missing %d events for room %r: %r...", - len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] - ) - - missing_events = yield self.get_missing_events( - origin, - pdu.room_id, - earliest_events_ids=list(latest), - latest_events=[pdu], - limit=10, - min_depth=min_depth, - ) - - # We want to sort these by depth so we process them and - # tell clients about them in order. - missing_events.sort(key=lambda x: x.depth) - - for e in missing_events: - yield self._handle_new_pdu( - origin, - e, - get_missing=False - ) - - have_seen = yield self.store.have_events( - [ev for ev, _ in pdu.prev_events] - ) + # If we're missing stuff, ensure we only fetch stuff one + # at a time. + with (yield self._room_pdu_linearizer.queue(pdu.room_id)): + # We recalculate seen, since it may have changed. + have_seen = yield self.store.have_events(prevs) + seen = set(have_seen.keys()) + + if prevs - seen: + latest = yield self.store.get_latest_event_ids_in_room( + pdu.room_id + ) + + # We add the prev events that we have seen to the latest + # list to ensure the remote server doesn't give them to us + latest = set(latest) + latest |= seen + + logger.info( + "Missing %d events for room %r: %r...", + len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] + ) + + missing_events = yield self.get_missing_events( + origin, + pdu.room_id, + earliest_events_ids=list(latest), + latest_events=[pdu], + limit=10, + min_depth=min_depth, + ) + + # We want to sort these by depth so we process them and + # tell clients about them in order. + missing_events.sort(key=lambda x: x.depth) + + for e in missing_events: + yield self._handle_new_pdu( + origin, + e, + get_missing=False + ) + + have_seen = yield self.store.have_events( + [ev for ev, _ in pdu.prev_events] + ) prevs = {e_id for e_id, _ in pdu.prev_events} seen = set(have_seen.keys()) diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py index 3e062a5eab..ea66a5dcbc 100644 --- a/synapse/federation/replication.py +++ b/synapse/federation/replication.py @@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer): self.hs = hs + super(ReplicationLayer, self).__init__(hs) + def __str__(self): return "" % self.server_name -- cgit 1.4.1 From 0ef0655b83adee8671358e35c42e2e646ef8d2fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 15:50:17 +0100 Subject: Bump version and changelog --- CHANGES.rst | 27 ++++++++++++++++++++++++++- synapse/__init__.py | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 32f18e7098..d5f465792c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,28 @@ +Changes in synapse v0.16.1-rc1 (2016-06-15) +=========================================== + +Features: None + +Changes: + +* Log requester for ``/publicRoom`` endpoints when possible (PR #856) +* 502 on ``/thumbnail`` when can't connect to remote server (PR #862) +* Linearize fetching of gaps on incoming events (PR #871) + + +Bugs fixes: + +* Fix bug where rooms where marked as published by default (PR #857) +* Fix bug where joining room with an event with invalid sender (PR #868) +* Fix bug where backfilled events were sent down sync streams (PR #869) +* Fix bug where outgoing connections could wedge indefinitely (PR #870) + + +Performance improvements: + +* Improve ``/publicRooms`` performance(PR #859) + + Changes in synapse v0.16.0 (2016-06-09) ======================================= @@ -28,7 +53,7 @@ Bug fixes: * Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) -Performance Improvements: +Performance improvements: * Remove event fetching from DB threads (PR #835) * Change the way we cache events (PR #836) diff --git a/synapse/__init__.py b/synapse/__init__.py index dc211e9637..faaa86d972 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.0" +__version__ = "0.16.1-rc1" -- cgit 1.4.1 From 0477368e9afb7de9d8f95352f47973e51f0a837c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:06:26 +0100 Subject: Update change log --- CHANGES.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d5f465792c..1a47aae857 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -15,7 +15,8 @@ Bugs fixes: * Fix bug where rooms where marked as published by default (PR #857) * Fix bug where joining room with an event with invalid sender (PR #868) * Fix bug where backfilled events were sent down sync streams (PR #869) -* Fix bug where outgoing connections could wedge indefinitely (PR #870) +* Fix bug where outgoing connections could wedge indefinitely, causing push + notifications to be unreliable (PR #870) Performance improvements: -- cgit 1.4.1 From a60169ea0987df41ee540eefbb77cf3ff53446bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:57:48 +0100 Subject: Handle og props with not content --- synapse/rest/media/v1/preview_url_resource.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 37dd1de899..fc72896e0c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -252,7 +252,8 @@ class PreviewUrlResource(Resource): og = {} for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"): - og[tag.attrib['property']] = tag.attrib['content'] + if 'content' in tag.attrib: + og[tag.attrib['property']] = tag.attrib['content'] # TODO: grab article: meta tags too, e.g.: -- cgit 1.4.1 From 1e9026e484be0f90256ae60c05eed9d1f87cf6b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:58:05 +0100 Subject: Handle floats as img widths --- synapse/rest/media/v1/preview_url_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index fc72896e0c..a6807df620 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -280,7 +280,7 @@ class PreviewUrlResource(Resource): # TODO: consider inlined CSS styles as well as width & height attribs images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") images = sorted(images, key=lambda i: ( - -1 * int(i.attrib['width']) * int(i.attrib['height']) + -1 * float(i.attrib['width']) * float(i.attrib['height']) )) if not images: images = tree.xpath("//img[@src]") -- cgit 1.4.1 From 09a17f965cf55dca45983473ed744f539b9ec92e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:58:12 +0100 Subject: Line lengths --- synapse/rest/media/v1/preview_url_resource.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index a6807df620..74c64f1371 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -288,9 +288,9 @@ class PreviewUrlResource(Resource): og['og:image'] = images[0].attrib['src'] # pre-cache the image for posterity - # FIXME: it might be cleaner to use the same flow as the main /preview_url request - # itself and benefit from the same caching etc. But for now we just rely on the - # caching on the master request to speed things up. + # FIXME: it might be cleaner to use the same flow as the main /preview_url + # request itself and benefit from the same caching etc. But for now we + # just rely on the caching on the master request to speed things up. if 'og:image' in og and og['og:image']: image_info = yield self._download_url( self._rebase_url(og['og:image'], media_info['uri']), requester.user -- cgit 1.4.1 From ed5f43a55accc8502a60b721871b208db704de3e Mon Sep 17 00:00:00 2001 From: Salvatore LaMendola Date: Thu, 16 Jun 2016 00:43:42 -0400 Subject: Fix TypeError in call to bcrypt.hashpw - At the very least, this TypeError caused logins to fail on my own running instance of Synapse, and the simple (explicit) UTF-8 conversion resolved login errors for me. Signed-off-by: Salvatore LaMendola --- synapse/handlers/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 200793b5ed..b38f81e999 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -626,6 +626,6 @@ class AuthHandler(BaseHandler): Whether self.hash(password) == stored_hash (bool). """ if stored_hash: - return bcrypt.hashpw(password, stored_hash) == stored_hash + return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash else: return False -- cgit 1.4.1 From 885ee861f7270fef1370a2d63e009a8fceaf8dd5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 11:06:12 +0100 Subject: Inline the synchrotron and pusher configs into the main config --- synapse/app/pusher.py | 171 ++++++++++++------------------------------- synapse/app/synchrotron.py | 137 +++++++++------------------------- synapse/config/homeserver.py | 4 +- synapse/config/logger.py | 102 +++++++++++++------------- synapse/config/server.py | 31 ++++---- 5 files changed, 154 insertions(+), 291 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 4ec23d84c1..6c8c02fb38 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -18,10 +18,9 @@ import synapse from synapse.server import HomeServer from synapse.config._base import ConfigError -from synapse.config.database import DatabaseConfig -from synapse.config.logger import LoggingConfig -from synapse.config.emailconfig import EmailConfig -from synapse.config.key import KeyConfig +from synapse.config.workers import clobber_with_worker_config +from synapse.config.logger import setup_logging +from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseSite from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.storage.roommember import RoomMemberStore @@ -43,98 +42,12 @@ from twisted.web.resource import Resource from daemonize import Daemonize -import gc import sys import logging logger = logging.getLogger("synapse.app.pusher") -class SlaveConfig(DatabaseConfig): - def read_config(self, config): - self.replication_url = config["replication_url"] - self.server_name = config["server_name"] - self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( - "use_insecure_ssl_client_just_for_testing_do_not_use", False - ) - self.user_agent_suffix = None - self.start_pushers = True - self.listeners = config["listeners"] - self.soft_file_limit = config.get("soft_file_limit") - self.daemonize = config.get("daemonize") - self.pid_file = self.abspath(config.get("pid_file")) - self.public_baseurl = config["public_baseurl"] - - thresholds = config.get("gc_thresholds", None) - if thresholds is not None: - try: - assert len(thresholds) == 3 - self.gc_thresholds = ( - int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), - ) - except: - raise ConfigError( - "Value of `gc_threshold` must be a list of three integers if set" - ) - else: - self.gc_thresholds = None - - # some things used by the auth handler but not actually used in the - # pusher codebase - self.bcrypt_rounds = None - self.ldap_enabled = None - self.ldap_server = None - self.ldap_port = None - self.ldap_tls = None - self.ldap_search_base = None - self.ldap_search_property = None - self.ldap_email_property = None - self.ldap_full_name_property = None - - # We would otherwise try to use the registration shared secret as the - # macaroon shared secret if there was no macaroon_shared_secret, but - # that means pulling in RegistrationConfig too. We don't need to be - # backwards compaitible in the pusher codebase so just make people set - # macaroon_shared_secret. We set this to None to prevent it referencing - # an undefined key. - self.registration_shared_secret = None - - def default_config(self, server_name, **kwargs): - pid_file = self.abspath("pusher.pid") - return """\ - # Slave configuration - - # The replication listener on the synapse to talk to. - #replication_url: https://localhost:{replication_port}/_synapse/replication - - server_name: "%(server_name)s" - - listeners: [] - # Enable a ssh manhole listener on the pusher. - # - type: manhole - # port: {manhole_port} - # bind_address: 127.0.0.1 - # Enable a metric listener on the pusher. - # - type: http - # port: {metrics_port} - # bind_address: 127.0.0.1 - # resources: - # - names: ["metrics"] - # compress: False - - report_stats: False - - daemonize: False - - pid_file: %(pid_file)s - - """ % locals() - - -class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig): - pass - - class PusherSlaveStore( SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore, SlavedAccountDataStore @@ -232,8 +145,8 @@ class PusherServer(HomeServer): ) logger.info("Synapse pusher now listening on port %d", port) - def start_listening(self): - for listener in self.config.listeners: + def start_listening(self, listeners): + for listener in listeners: if listener["type"] == "http": self._listen_http(listener) elif listener["type"] == "manhole": @@ -329,19 +242,32 @@ class PusherServer(HomeServer): yield sleep(30) -def setup(config_options): +def setup(worker_name, config_options): try: - config = PusherSlaveConfig.load_config( + config = HomeServerConfig.load_config( "Synapse pusher", config_options ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - if not config: - sys.exit(0) + worker_config = config.workers[worker_name] - config.setup_logging() + setup_logging(worker_config.log_config, worker_config.log_file) + + clobber_with_worker_config(config, worker_config) + + if config.start_pushers: + sys.stderr.write( + "\nThe pushers must be disabled in the main synapse process" + "\nbefore they can be run in a separate worker." + "\nPlease add ``start_pushers: false`` to the main config" + "\n" + ) + sys.exit(1) + + # Force the pushers to start since they will be disabled in the main config + config.start_pushers = True database_engine = create_engine(config.database_config) @@ -354,11 +280,15 @@ def setup(config_options): ) ps.setup() - ps.start_listening() - - change_resource_limit(ps.config.soft_file_limit) - if ps.config.gc_thresholds: - gc.set_threshold(*ps.config.gc_thresholds) + ps.start_listening(worker_config.listeners) + + def run(): + with LoggingContext("run"): + logger.info("Running") + change_resource_limit(worker_config.soft_file_limit) + if worker_config.gc_thresholds: + ps.set_threshold(worker_config.gc_thresholds) + reactor.run() def start(): ps.replicate() @@ -367,30 +297,21 @@ def setup(config_options): reactor.callWhenRunning(start) - return ps + if worker_config.daemonize: + daemon = Daemonize( + app="synapse-pusher", + pid=worker_config.pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() if __name__ == '__main__': with LoggingContext("main"): - ps = setup(sys.argv[1:]) - - if ps.config.daemonize: - def run(): - with LoggingContext("run"): - change_resource_limit(ps.config.soft_file_limit) - if ps.config.gc_thresholds: - gc.set_threshold(*ps.config.gc_thresholds) - reactor.run() - - daemon = Daemonize( - app="synapse-pusher", - pid=ps.config.pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - - daemon.start() - else: - reactor.run() + worker_name = sys.argv[1] + ps = setup(worker_name, sys.argv[2:]) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 297e199453..7a607faef6 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -18,9 +18,9 @@ import synapse from synapse.api.constants import EventTypes, PresenceState from synapse.config._base import ConfigError -from synapse.config.database import DatabaseConfig -from synapse.config.logger import LoggingConfig -from synapse.config.appservice import AppServiceConfig +from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging +from synapse.config.workers import clobber_with_worker_config from synapse.events import FrozenEvent from synapse.handlers.presence import PresenceHandler from synapse.http.site import SynapseSite @@ -57,76 +57,11 @@ from daemonize import Daemonize import sys import logging import contextlib -import gc import ujson as json logger = logging.getLogger("synapse.app.synchrotron") -class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig): - def read_config(self, config): - self.replication_url = config["replication_url"] - self.server_name = config["server_name"] - self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( - "use_insecure_ssl_client_just_for_testing_do_not_use", False - ) - self.user_agent_suffix = None - self.listeners = config["listeners"] - self.soft_file_limit = config.get("soft_file_limit") - self.daemonize = config.get("daemonize") - self.pid_file = self.abspath(config.get("pid_file")) - self.macaroon_secret_key = config["macaroon_secret_key"] - self.expire_access_token = config.get("expire_access_token", False) - - thresholds = config.get("gc_thresholds", None) - if thresholds is not None: - try: - assert len(thresholds) == 3 - self.gc_thresholds = ( - int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), - ) - except: - raise ConfigError( - "Value of `gc_threshold` must be a list of three integers if set" - ) - else: - self.gc_thresholds = None - - def default_config(self, server_name, **kwargs): - pid_file = self.abspath("synchroton.pid") - return """\ - # Slave configuration - - # The replication listener on the synapse to talk to. - #replication_url: https://localhost:{replication_port}/_synapse/replication - - server_name: "%(server_name)s" - - listeners: - # Enable a /sync listener on the synchrontron - #- type: http - # port: {http_port} - # bind_address: "" - # Enable a ssh manhole listener on the synchrotron - # - type: manhole - # port: {manhole_port} - # bind_address: 127.0.0.1 - # Enable a metric listener on the synchrotron - # - type: http - # port: {metrics_port} - # bind_address: 127.0.0.1 - # resources: - # - names: ["metrics"] - # compress: False - - report_stats: False - - daemonize: False - - pid_file: %(pid_file)s - """ % locals() - - class SynchrotronSlavedStore( SlavedPushRuleStore, SlavedEventStore, @@ -350,8 +285,8 @@ class SynchrotronServer(HomeServer): ) logger.info("Synapse synchrotron now listening on port %d", port) - def start_listening(self): - for listener in self.config.listeners: + def start_listening(self, listeners): + for listener in listeners: if listener["type"] == "http": self._listen_http(listener) elif listener["type"] == "manhole": @@ -470,19 +405,20 @@ class SynchrotronServer(HomeServer): return SynchrotronTyping(self) -def setup(config_options): +def start(worker_name, config_options): try: - config = SynchrotronConfig.load_config( + config = HomeServerConfig.load_config( "Synapse synchrotron", config_options ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - if not config: - sys.exit(0) + worker_config = config.workers[worker_name] - config.setup_logging() + setup_logging(worker_config.log_config, worker_config.log_file) + + clobber_with_worker_config(config, worker_config) database_engine = create_engine(config.database_config) @@ -496,11 +432,15 @@ def setup(config_options): ) ss.setup() - ss.start_listening() - - change_resource_limit(ss.config.soft_file_limit) - if ss.config.gc_thresholds: - ss.set_threshold(*ss.config.gc_thresholds) + ss.start_listening(worker_config.listeners) + + def run(): + with LoggingContext("run"): + logger.info("Running") + change_resource_limit(worker_config.soft_file_limit) + if worker_config.gc_thresholds: + ss.set_threshold(worker_config.gc_thresholds) + reactor.run() def start(): ss.get_datastore().start_profiling() @@ -508,30 +448,21 @@ def setup(config_options): reactor.callWhenRunning(start) - return ss + if worker_config.daemonize: + daemon = Daemonize( + app="synapse-synchrotron", + pid=worker_config.pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() if __name__ == '__main__': with LoggingContext("main"): - ss = setup(sys.argv[1:]) - - if ss.config.daemonize: - def run(): - with LoggingContext("run"): - change_resource_limit(ss.config.soft_file_limit) - if ss.config.gc_thresholds: - gc.set_threshold(*ss.config.gc_thresholds) - reactor.run() - - daemon = Daemonize( - app="synapse-synchrotron", - pid=ss.config.pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - - daemon.start() - else: - reactor.run() + worker_name = sys.argv[1] + start(worker_name, sys.argv[2:]) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index fc2445484c..79b0534b3b 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -32,13 +32,15 @@ from .password import PasswordConfig from .jwt import JWTConfig from .ldap import LDAPConfig from .emailconfig import EmailConfig +from .workers import WorkerConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, RatelimitConfig, ContentRepositoryConfig, CaptchaConfig, VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig, AppServiceConfig, KeyConfig, SAML2Config, CasConfig, - JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,): + JWTConfig, LDAPConfig, PasswordConfig, EmailConfig, + WorkerConfig,): pass diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 5047db898f..dc68683fbc 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -126,54 +126,58 @@ class LoggingConfig(Config): ) def setup_logging(self): - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" - " - %(message)s" - ) - if self.log_config is None: - - level = logging.INFO - level_for_storage = logging.INFO - if self.verbosity: - level = logging.DEBUG - if self.verbosity > 1: - level_for_storage = logging.DEBUG - - # FIXME: we need a logging.WARN for a -q quiet option - logger = logging.getLogger('') - logger.setLevel(level) - - logging.getLogger('synapse.storage').setLevel(level_for_storage) - - formatter = logging.Formatter(log_format) - if self.log_file: - # TODO: Customisable file size / backup count - handler = logging.handlers.RotatingFileHandler( - self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 - ) - - def sighup(signum, stack): - logger.info("Closing log file due to SIGHUP") - handler.doRollover() - logger.info("Opened new log file due to SIGHUP") - - # TODO(paul): obviously this is a terrible mechanism for - # stealing SIGHUP, because it means no other part of synapse - # can use it instead. If we want to catch SIGHUP anywhere - # else as well, I'd suggest we find a nicer way to broadcast - # it around. - if getattr(signal, "SIGHUP"): - signal.signal(signal.SIGHUP, sighup) - else: - handler = logging.StreamHandler() - handler.setFormatter(formatter) - - handler.addFilter(LoggingContextFilter(request="")) - - logger.addHandler(handler) + setup_logging(self.log_config, self.log_file, self.verbosity) + + +def setup_logging(log_config=None, log_file=None, verbosity=None): + log_format = ( + "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" + " - %(message)s" + ) + if log_config is None: + + level = logging.INFO + level_for_storage = logging.INFO + if verbosity: + level = logging.DEBUG + if verbosity > 1: + level_for_storage = logging.DEBUG + + # FIXME: we need a logging.WARN for a -q quiet option + logger = logging.getLogger('') + logger.setLevel(level) + + logging.getLogger('synapse.storage').setLevel(level_for_storage) + + formatter = logging.Formatter(log_format) + if log_file: + # TODO: Customisable file size / backup count + handler = logging.handlers.RotatingFileHandler( + log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 + ) + + def sighup(signum, stack): + logger.info("Closing log file due to SIGHUP") + handler.doRollover() + logger.info("Opened new log file due to SIGHUP") + + # TODO(paul): obviously this is a terrible mechanism for + # stealing SIGHUP, because it means no other part of synapse + # can use it instead. If we want to catch SIGHUP anywhere + # else as well, I'd suggest we find a nicer way to broadcast + # it around. + if getattr(signal, "SIGHUP"): + signal.signal(signal.SIGHUP, sighup) else: - with open(self.log_config, 'r') as f: - logging.config.dictConfig(yaml.load(f)) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + + handler.addFilter(LoggingContextFilter(request="")) + + logger.addHandler(handler) + else: + with open(log_config, 'r') as f: + logging.config.dictConfig(yaml.load(f)) - observer = PythonLoggingObserver() - observer.start() + observer = PythonLoggingObserver() + observer.start() diff --git a/synapse/config/server.py b/synapse/config/server.py index 44b8d422e0..f370b22c32 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -38,19 +38,7 @@ class ServerConfig(Config): self.listeners = config.get("listeners", []) - thresholds = config.get("gc_thresholds", None) - if thresholds is not None: - try: - assert len(thresholds) == 3 - self.gc_thresholds = ( - int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), - ) - except: - raise ConfigError( - "Value of `gc_threshold` must be a list of three integers if set" - ) - else: - self.gc_thresholds = None + self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) bind_port = config.get("bind_port") if bind_port: @@ -264,3 +252,20 @@ class ServerConfig(Config): type=int, help="Turn on the twisted telnet manhole" " service on the given port.") + + +def read_gc_thresholds(thresholds): + """Reads the three integer thresholds for garbage collection. Ensures that + the thresholds are integers if thresholds are supplied. + """ + if thresholds is None: + return None + try: + assert len(thresholds) == 3 + return ( + int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), + ) + except: + raise ConfigError( + "Value of `gc_threshold` must be a list of three integers if set" + ) -- cgit 1.4.1 From dbb5a39b64e3b52978ecb98f8f64b7b50acf9b59 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 11:09:15 +0100 Subject: Add worker config module --- synapse/config/workers.py | 71 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 synapse/config/workers.py diff --git a/synapse/config/workers.py b/synapse/config/workers.py new file mode 100644 index 0000000000..fd19e38b87 --- /dev/null +++ b/synapse/config/workers.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 matrix.org +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from ._base import Config +from .server import read_gc_thresholds + + +Worker = collections.namedtuple("Worker", [ + "app", + "listeners", + "pid_file", + "daemonize", + "log_file", + "log_config", + "event_cache_size", + "soft_file_limit", + "gc_thresholds", + "replication_url", +]) + + +def clobber_with_worker_config(config, worker_config): + """Overrides some of the keys of the main config with worker-specific + values.""" + config.event_cache_size = worker_config.event_cache_size + config.replication_url = worker_config.replication_url + + +def read_worker_config(config): + return Worker( + app=config["app"], + listeners=config.get("listeners", []), + pid_file=config.get("pid_file"), + daemonize=config["daemonize"], + log_file=config.get("log_file"), + log_config=config.get("log_config"), + event_cache_size=Config.parse_size(config.get("event_cache_size", "10K")), + soft_file_limit=config.get("soft_file_limit"), + gc_thresholds=read_gc_thresholds(config.get("gc_thresholds")), + replication_url=config.get("replication_url"), + ) + + +class WorkerConfig(Config): + """The workers are processes run separately to the main synapse process. + Each worker has a name that identifies it within the config file. + They have their own pid_file and listener configuration. They use the + replication_url to talk to the main synapse process. They have their + own cache size tuning, gc threshold tuning and open file limits.""" + + def read_config(self, config): + workers = config.get("workers", {}) + + self.workers = { + worker_name: read_worker_config(worker_config) + for worker_name, worker_config in workers.items() + } -- cgit 1.4.1 From 80a1bc7db517298baec24c1f11a144552719fb7b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 11:29:45 +0100 Subject: Comment on what's going on in clobber_with_worker_config --- synapse/config/workers.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index fd19e38b87..4f4658c0a8 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -35,8 +35,19 @@ Worker = collections.namedtuple("Worker", [ def clobber_with_worker_config(config, worker_config): """Overrides some of the keys of the main config with worker-specific - values.""" + values. We only need to override the keys that are accessed deep + withing synapse code. Most of the keys that we want to override in + the workers are accessed in setup code that is rewritten specifically + for the workers. In that new code we can access the worker config directly, + so we don't need to override the values in the main config.""" + + # TODO: The event_cache_size is accessed in the db setup. It should be + # possible to rejigg that code so that the cache size is pulled from the + # worker config directly. config.event_cache_size = worker_config.event_cache_size + + # TODO: The replication_url should only be accessed within worker specific + # code so it really shouldn't need to be clobbered in the main config. config.replication_url = worker_config.replication_url -- cgit 1.4.1 From bde13833cb42fc6e09928ffb4f4efad9244abffa Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 12:44:40 +0100 Subject: Access replication_url from the worker config directly --- synapse/app/pusher.py | 5 +++-- synapse/app/synchrotron.py | 5 +++-- synapse/config/workers.py | 4 ---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 6c8c02fb38..a26a3bd394 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -112,7 +112,7 @@ class PusherServer(HomeServer): def remove_pusher(self, app_id, push_key, user_id): http_client = self.get_simple_http_client() - replication_url = self.config.replication_url + replication_url = self.worker_config.replication_url url = replication_url + "/remove_pushers" return http_client.post_json_get_json(url, { "remove": [{ @@ -166,7 +166,7 @@ class PusherServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.config.replication_url + replication_url = self.worker_config.replication_url pusher_pool = self.get_pusherpool() clock = self.get_clock() @@ -275,6 +275,7 @@ def setup(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, + worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, ) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 7a607faef6..4443c73e6a 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -98,7 +98,7 @@ class SynchrotronPresence(object): self.http_client = hs.get_simple_http_client() self.store = hs.get_datastore() self.user_to_num_current_syncs = {} - self.syncing_users_url = hs.config.replication_url + "/syncing_users" + self.syncing_users_url = hs.worker_config.replication_url + "/syncing_users" self.clock = hs.get_clock() active_presence = self.store.take_presence_startup_info() @@ -306,7 +306,7 @@ class SynchrotronServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.config.replication_url + replication_url = self.worker_config.replication_url clock = self.get_clock() notifier = self.get_notifier() presence_handler = self.get_presence_handler() @@ -426,6 +426,7 @@ def start(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, + worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, application_service_handler=SynchrotronApplicationService(), diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 4f4658c0a8..f2c77ef59a 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -46,10 +46,6 @@ def clobber_with_worker_config(config, worker_config): # worker config directly. config.event_cache_size = worker_config.event_cache_size - # TODO: The replication_url should only be accessed within worker specific - # code so it really shouldn't need to be clobbered in the main config. - config.replication_url = worker_config.replication_url - def read_worker_config(config): return Worker( -- cgit 1.4.1 From 364d6167926d5d8b2a312e3d35623d2e05330e0a Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 12:53:15 +0100 Subject: Access the event_cache_size directly from the server object. This means that the workers can override the event_cache_size directly without clobbering the value in the main synapse config. --- synapse/app/pusher.py | 6 +++--- synapse/app/synchrotron.py | 6 +++--- synapse/config/workers.py | 14 -------------- synapse/server.py | 3 +++ synapse/storage/_base.py | 2 +- 5 files changed, 10 insertions(+), 21 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index a26a3bd394..5d4db4f892 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -18,7 +18,6 @@ import synapse from synapse.server import HomeServer from synapse.config._base import ConfigError -from synapse.config.workers import clobber_with_worker_config from synapse.config.logger import setup_logging from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseSite @@ -241,6 +240,9 @@ class PusherServer(HomeServer): logger.exception("Error replicating from %r", replication_url) yield sleep(30) + def get_event_cache_size(self): + return self.worker_config.event_cache_size + def setup(worker_name, config_options): try: @@ -255,8 +257,6 @@ def setup(worker_name, config_options): setup_logging(worker_config.log_config, worker_config.log_file) - clobber_with_worker_config(config, worker_config) - if config.start_pushers: sys.stderr.write( "\nThe pushers must be disabled in the main synapse process" diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 4443c73e6a..d10bb2b3f0 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -20,7 +20,6 @@ from synapse.api.constants import EventTypes, PresenceState from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging -from synapse.config.workers import clobber_with_worker_config from synapse.events import FrozenEvent from synapse.handlers.presence import PresenceHandler from synapse.http.site import SynapseSite @@ -404,6 +403,9 @@ class SynchrotronServer(HomeServer): def build_typing_handler(self): return SynchrotronTyping(self) + def get_event_cache_size(self): + return self.worker_config.event_cache_size + def start(worker_name, config_options): try: @@ -418,8 +420,6 @@ def start(worker_name, config_options): setup_logging(worker_config.log_config, worker_config.log_file) - clobber_with_worker_config(config, worker_config) - database_engine = create_engine(config.database_config) ss = SynchrotronServer( diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f2c77ef59a..503358e03e 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -33,20 +33,6 @@ Worker = collections.namedtuple("Worker", [ ]) -def clobber_with_worker_config(config, worker_config): - """Overrides some of the keys of the main config with worker-specific - values. We only need to override the keys that are accessed deep - withing synapse code. Most of the keys that we want to override in - the workers are accessed in setup code that is rewritten specifically - for the workers. In that new code we can access the worker config directly, - so we don't need to override the values in the main config.""" - - # TODO: The event_cache_size is accessed in the db setup. It should be - # possible to rejigg that code so that the cache size is pulled from the - # worker config directly. - config.event_cache_size = worker_config.event_cache_size - - def read_worker_config(config): return Worker( app=config["app"], diff --git a/synapse/server.py b/synapse/server.py index dd4b81c658..b3c31ece73 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -236,6 +236,9 @@ class HomeServer(object): def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) + def get_event_cache_size(self): + return self.config.event_cache_size + def _make_dependency_method(depname): def _get(hs): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 32c6677d47..2932880cc5 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -166,7 +166,7 @@ class SQLBaseStore(object): self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True, - max_entries=hs.config.event_cache_size) + max_entries=hs.get_event_cache_size()) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR -- cgit 1.4.1 From a352b68acf473f59012340b7f481f3dfd6544ac6 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 17:29:50 +0100 Subject: Use worker_ prefixes for worker config, use existing support for multiple config files --- synapse/app/pusher.py | 29 ++++++++++++--------------- synapse/app/synchrotron.py | 29 ++++++++++++--------------- synapse/config/workers.py | 49 ++++++++-------------------------------------- synapse/server.py | 3 --- synapse/storage/_base.py | 2 +- 5 files changed, 33 insertions(+), 79 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 5d4db4f892..9ac26d52c6 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -111,7 +111,7 @@ class PusherServer(HomeServer): def remove_pusher(self, app_id, push_key, user_id): http_client = self.get_simple_http_client() - replication_url = self.worker_config.replication_url + replication_url = self.config.worker_replication_url url = replication_url + "/remove_pushers" return http_client.post_json_get_json(url, { "remove": [{ @@ -165,7 +165,7 @@ class PusherServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.worker_config.replication_url + replication_url = self.config.worker_replication_url pusher_pool = self.get_pusherpool() clock = self.get_clock() @@ -240,11 +240,8 @@ class PusherServer(HomeServer): logger.exception("Error replicating from %r", replication_url) yield sleep(30) - def get_event_cache_size(self): - return self.worker_config.event_cache_size - -def setup(worker_name, config_options): +def start(config_options): try: config = HomeServerConfig.load_config( "Synapse pusher", config_options @@ -253,9 +250,9 @@ def setup(worker_name, config_options): sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - worker_config = config.workers[worker_name] + assert config.worker_app == "synapse.app.pusher" - setup_logging(worker_config.log_config, worker_config.log_file) + setup_logging(config.worker_log_config, config.worker_log_file) if config.start_pushers: sys.stderr.write( @@ -275,20 +272,19 @@ def setup(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, - worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, ) ps.setup() - ps.start_listening(worker_config.listeners) + ps.start_listening(config.worker_listeners) def run(): with LoggingContext("run"): logger.info("Running") - change_resource_limit(worker_config.soft_file_limit) - if worker_config.gc_thresholds: - ps.set_threshold(worker_config.gc_thresholds) + change_resource_limit(config.soft_file_limit) + if config.gc_thresholds: + ps.set_threshold(config.gc_thresholds) reactor.run() def start(): @@ -298,10 +294,10 @@ def setup(worker_name, config_options): reactor.callWhenRunning(start) - if worker_config.daemonize: + if config.worker_daemonize: daemon = Daemonize( app="synapse-pusher", - pid=worker_config.pid_file, + pid=config.worker_pid_file, action=run, auto_close_fds=False, verbose=True, @@ -314,5 +310,4 @@ def setup(worker_name, config_options): if __name__ == '__main__': with LoggingContext("main"): - worker_name = sys.argv[1] - ps = setup(worker_name, sys.argv[2:]) + ps = start(sys.argv[1:]) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index d10bb2b3f0..160db8637e 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -97,7 +97,7 @@ class SynchrotronPresence(object): self.http_client = hs.get_simple_http_client() self.store = hs.get_datastore() self.user_to_num_current_syncs = {} - self.syncing_users_url = hs.worker_config.replication_url + "/syncing_users" + self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users" self.clock = hs.get_clock() active_presence = self.store.take_presence_startup_info() @@ -305,7 +305,7 @@ class SynchrotronServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.worker_config.replication_url + replication_url = self.config.worker_replication_url clock = self.get_clock() notifier = self.get_notifier() presence_handler = self.get_presence_handler() @@ -403,11 +403,8 @@ class SynchrotronServer(HomeServer): def build_typing_handler(self): return SynchrotronTyping(self) - def get_event_cache_size(self): - return self.worker_config.event_cache_size - -def start(worker_name, config_options): +def start(config_options): try: config = HomeServerConfig.load_config( "Synapse synchrotron", config_options @@ -416,9 +413,9 @@ def start(worker_name, config_options): sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - worker_config = config.workers[worker_name] + assert config.worker_app == "synapse.app.synchrotron" - setup_logging(worker_config.log_config, worker_config.log_file) + setup_logging(config.worker_log_config, config.worker_log_file) database_engine = create_engine(config.database_config) @@ -426,21 +423,20 @@ def start(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, - worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, application_service_handler=SynchrotronApplicationService(), ) ss.setup() - ss.start_listening(worker_config.listeners) + ss.start_listening(config.worker_listeners) def run(): with LoggingContext("run"): logger.info("Running") - change_resource_limit(worker_config.soft_file_limit) - if worker_config.gc_thresholds: - ss.set_threshold(worker_config.gc_thresholds) + change_resource_limit(config.soft_file_limit) + if config.gc_thresholds: + ss.set_threshold(config.gc_thresholds) reactor.run() def start(): @@ -449,10 +445,10 @@ def start(worker_name, config_options): reactor.callWhenRunning(start) - if worker_config.daemonize: + if config.worker_daemonize: daemon = Daemonize( app="synapse-synchrotron", - pid=worker_config.pid_file, + pid=config.worker_pid_file, action=run, auto_close_fds=False, verbose=True, @@ -465,5 +461,4 @@ def start(worker_name, config_options): if __name__ == '__main__': with LoggingContext("main"): - worker_name = sys.argv[1] - start(worker_name, sys.argv[2:]) + start(sys.argv[1:]) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 503358e03e..904789d155 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -13,52 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections - from ._base import Config -from .server import read_gc_thresholds - - -Worker = collections.namedtuple("Worker", [ - "app", - "listeners", - "pid_file", - "daemonize", - "log_file", - "log_config", - "event_cache_size", - "soft_file_limit", - "gc_thresholds", - "replication_url", -]) - - -def read_worker_config(config): - return Worker( - app=config["app"], - listeners=config.get("listeners", []), - pid_file=config.get("pid_file"), - daemonize=config["daemonize"], - log_file=config.get("log_file"), - log_config=config.get("log_config"), - event_cache_size=Config.parse_size(config.get("event_cache_size", "10K")), - soft_file_limit=config.get("soft_file_limit"), - gc_thresholds=read_gc_thresholds(config.get("gc_thresholds")), - replication_url=config.get("replication_url"), - ) class WorkerConfig(Config): """The workers are processes run separately to the main synapse process. - Each worker has a name that identifies it within the config file. They have their own pid_file and listener configuration. They use the - replication_url to talk to the main synapse process. They have their - own cache size tuning, gc threshold tuning and open file limits.""" + replication_url to talk to the main synapse process.""" def read_config(self, config): - workers = config.get("workers", {}) - - self.workers = { - worker_name: read_worker_config(worker_config) - for worker_name, worker_config in workers.items() - } + self.worker_app = config.get("worker_app") + self.worker_listeners = config.get("worker_listeners") + self.worker_daemonize = config.get("worker_daemonize") + self.worker_pid_file = config.get("worker_pid_file") + self.worker_log_file = config.get("worker_log_file") + self.worker_log_config = config.get("worker_log_config") + self.worker_replication_url = config.get("worker_replication_url") diff --git a/synapse/server.py b/synapse/server.py index b3c31ece73..dd4b81c658 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -236,9 +236,6 @@ class HomeServer(object): def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) - def get_event_cache_size(self): - return self.config.event_cache_size - def _make_dependency_method(depname): def _get(hs): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 2932880cc5..32c6677d47 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -166,7 +166,7 @@ class SQLBaseStore(object): self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True, - max_entries=hs.get_event_cache_size()) + max_entries=hs.config.event_cache_size) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR -- cgit 1.4.1 From 8c75040c25495bf29f4c76ca0fcc032975210012 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 11:48:12 +0100 Subject: Fix setting gc thresholds in the workers --- synapse/app/pusher.py | 3 ++- synapse/app/synchrotron.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 9ac26d52c6..4f1d18ab5f 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -43,6 +43,7 @@ from daemonize import Daemonize import sys import logging +import gc logger = logging.getLogger("synapse.app.pusher") @@ -284,7 +285,7 @@ def start(config_options): logger.info("Running") change_resource_limit(config.soft_file_limit) if config.gc_thresholds: - ps.set_threshold(config.gc_thresholds) + gc.set_threshold(*config.gc_thresholds) reactor.run() def start(): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 160db8637e..8cf5bbbb6d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -56,6 +56,7 @@ from daemonize import Daemonize import sys import logging import contextlib +import gc import ujson as json logger = logging.getLogger("synapse.app.synchrotron") @@ -436,7 +437,7 @@ def start(config_options): logger.info("Running") change_resource_limit(config.soft_file_limit) if config.gc_thresholds: - ss.set_threshold(config.gc_thresholds) + gc.set_threshold(*config.gc_thresholds) reactor.run() def start(): -- cgit 1.4.1 From ded01c3bf65fd6bb83c9d3546ea44859208e4578 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 13:49:16 +0100 Subject: Fix ``KeyError: 'msgtype'``. Use ``.get`` Fixes a key error where the mailer tried to get the ``msgtype`` of an event that was missing a ``msgtype``. ``` File "synapse/push/mailer.py", line 264, in get_notif_vars File "synapse/push/mailer.py", line 285, in get_message_vars File ".../frozendict/__init__.py", line 10, in __getitem__ return self.__dict[key] KeyError: 'msgtype' ``` --- synapse/push/mailer.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index e5c3929cd7..1028731bc9 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -273,16 +273,16 @@ class Mailer(object): sender_state_event = room_state[("m.room.member", event.sender)] sender_name = name_from_member_event(sender_state_event) - sender_avatar_url = None - if "avatar_url" in sender_state_event.content: - sender_avatar_url = sender_state_event.content["avatar_url"] + sender_avatar_url = sender_state_event.content.get("avatar_url") # 'hash' for deterministically picking default images: use # sender_hash % the number of default images to choose from sender_hash = string_ordinal_total(event.sender) + msgtype = event.content.get("msgtype") + ret = { - "msgtype": event.content["msgtype"], + "msgtype": msgtype, "is_historical": event.event_id != notif['event_id'], "id": event.event_id, "ts": event.origin_server_ts, @@ -291,9 +291,9 @@ class Mailer(object): "sender_hash": sender_hash, } - if event.content["msgtype"] == "m.text": + if msgtype == "m.text": self.add_text_message_vars(ret, event) - elif event.content["msgtype"] == "m.image": + elif msgtype == "m.image": self.add_image_message_vars(ret, event) if "body" in event.content: @@ -302,16 +302,17 @@ class Mailer(object): return ret def add_text_message_vars(self, messagevars, event): - if "format" in event.content: - msgformat = event.content["format"] - else: - msgformat = None + msgformat = event.content.get("format") + messagevars["format"] = msgformat - if msgformat == "org.matrix.custom.html": - messagevars["body_text_html"] = safe_markup(event.content["formatted_body"]) - else: - messagevars["body_text_html"] = safe_text(event.content["body"]) + formatted_body = event.content.get("formatted_body") + body = event.content.get("body") + + if msgformat == "org.matrix.custom.html" and formatted_body: + messagevars["body_text_html"] = safe_markup(formatted_body) + elif body: + messagevars["body_text_html"] = safe_text(body) return messagevars -- cgit 1.4.1 From 2884712ca733f45d32468ecf2ede7a1518e85be4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 14:47:33 +0100 Subject: Only re-sign our own events --- synapse/federation/federation_server.py | 15 +++++++++------ synapse/handlers/federation.py | 15 +++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index fe92457ba1..2a589524a4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -193,13 +193,16 @@ class FederationServer(FederationBase): ) for event in auth_chain: - event.signatures.update( - compute_event_signature( - event, - self.hs.hostname, - self.hs.config.signing_key[0] + # We sign these again because there was a bug where we + # incorrectly signed things the first time round + if self.hs.is_mine_id(event.event_id): + event.signatures.update( + compute_event_signature( + event, + self.hs.hostname, + self.hs.config.signing_key[0] + ) ) - ) else: raise NotImplementedError("Specify an event") diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c2df43e2f6..6c0bc7eafa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1018,13 +1018,16 @@ class FederationHandler(BaseHandler): res = results.values() for event in res: - event.signatures.update( - compute_event_signature( - event, - self.hs.hostname, - self.hs.config.signing_key[0] + # We sign these again because there was a bug where we + # incorrectly signed things the first time round + if self.hs.is_mine_id(event.event_id): + event.signatures.update( + compute_event_signature( + event, + self.hs.hostname, + self.hs.config.signing_key[0] + ) ) - ) defer.returnValue(res) else: -- cgit 1.4.1 From 3e41de05cc13220f5cd88ae78002adf782728322 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 15:11:22 +0100 Subject: Turn use_frozen_events off by default --- synapse/config/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index f370b22c32..7840dc3ad6 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -27,7 +27,7 @@ class ServerConfig(Config): self.daemonize = config.get("daemonize") self.print_pidfile = config.get("print_pidfile") self.user_agent_suffix = config.get("user_agent_suffix") - self.use_frozen_dicts = config.get("use_frozen_dicts", True) + self.use_frozen_dicts = config.get("use_frozen_dicts", False) self.public_baseurl = config.get("public_baseurl") self.secondary_directory_servers = config.get("secondary_directory_servers", []) -- cgit 1.4.1 From 0113ad36ee7bc315aa162c42277b90764825f219 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 15:13:13 +0100 Subject: Enable use_frozen_events in tests --- tests/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/utils.py b/tests/utils.py index e19ae581e0..6e41ae1ff6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -54,6 +54,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.trusted_third_party_id_servers = [] config.room_invite_state_types = [] + config.use_frozen_dicts = True config.database_config = {"name": "sqlite3"} if "clock" not in kargs: -- cgit 1.4.1 From 120c2387053bdc30824d6b15931532664f739192 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 16:10:37 +0100 Subject: Disable responding with canonical json for federation --- synapse/federation/transport/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 6fc3e2207c..8a1965f45a 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -37,7 +37,7 @@ class TransportLayerServer(JsonResource): self.hs = hs self.clock = hs.get_clock() - super(TransportLayerServer, self).__init__(hs) + super(TransportLayerServer, self).__init__(hs, canonical_json=False) self.authenticator = Authenticator(hs) self.ratelimiter = FederationRateLimiter( -- cgit 1.4.1 From 9f1800fba852314332d7e682484e456d28838619 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 19:14:16 +0100 Subject: Remove registered_users from the distributor. The only place that was observed was to set the profile. I've made it so that the profile is set within store.register in the same transaction that creates the user. This required some slight changes to the registration code for upgrading guest users, since it previously relied on the distributor swallowing errors if the profile already existed. --- synapse/handlers/profile.py | 7 ------- synapse/handlers/register.py | 23 ++++++++++------------- synapse/storage/profile.py | 6 ------ synapse/storage/registration.py | 17 ++++++++++++++--- synapse/util/distributor.py | 4 ---- 5 files changed, 24 insertions(+), 33 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index e37409170d..711a6a567f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -36,13 +36,6 @@ class ProfileHandler(BaseHandler): "profile", self.on_profile_query ) - distributor = hs.get_distributor() - - distributor.observe("registered_user", self.registered_user) - - def registered_user(self, user): - return self.store.create_profile(user.localpart) - @defer.inlineCallbacks def get_displayname(self, target_user): if self.hs.is_mine(target_user): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e0aaefe7be..4fb12915dc 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -23,7 +23,6 @@ from synapse.api.errors import ( from ._base import BaseHandler from synapse.util.async import run_on_reactor from synapse.http.client import CaptchaServerHttpClient -from synapse.util.distributor import registered_user import logging import urllib @@ -37,8 +36,6 @@ class RegistrationHandler(BaseHandler): super(RegistrationHandler, self).__init__(hs) self.auth = hs.get_auth() - self.distributor = hs.get_distributor() - self.distributor.declare("registered_user") self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -140,9 +137,10 @@ class RegistrationHandler(BaseHandler): password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, + create_profile_with_localpart=( + None if was_guest else user.localpart + ), ) - - yield registered_user(self.distributor, user) else: # autogen a sequential user ID attempts = 0 @@ -160,7 +158,8 @@ class RegistrationHandler(BaseHandler): user_id=user_id, token=token, password_hash=password_hash, - make_guest=make_guest + make_guest=make_guest, + create_profile_with_localpart=user.localpart, ) except SynapseError: # if user id is taken, just generate another @@ -168,7 +167,6 @@ class RegistrationHandler(BaseHandler): user_id = None token = None attempts += 1 - yield registered_user(self.distributor, user) # We used to generate default identicons here, but nowadays # we want clients to generate their own as part of their branding @@ -201,8 +199,8 @@ class RegistrationHandler(BaseHandler): token=token, password_hash="", appservice_id=service_id, + create_profile_with_localpart=user.localpart, ) - yield registered_user(self.distributor, user) defer.returnValue((user_id, token)) @defer.inlineCallbacks @@ -248,9 +246,9 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=None + password_hash=None, + create_profile_with_localpart=user.localpart, ) - yield registered_user(self.distributor, user) except Exception as e: yield self.store.add_access_token_to_user(user_id, token) # Ignore Registration errors @@ -395,10 +393,9 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=None + password_hash=None, + create_profile_with_localpart=user.localpart, ) - - yield registered_user(self.distributor, user) else: yield self.store.user_delete_access_tokens(user_id=user_id) yield self.store.add_access_token_to_user(user_id=user_id, token=token) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 26a40905ae..c3c3f9ffd8 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -17,12 +17,6 @@ from ._base import SQLBaseStore class ProfileStore(SQLBaseStore): - def create_profile(self, user_localpart): - return self._simple_insert( - table="profiles", - values={"user_id": user_localpart}, - desc="create_profile", - ) def get_profile_displayname(self, user_localpart): return self._simple_select_one_onecol( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index bda84a744a..3de9e0f709 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -76,7 +76,8 @@ class RegistrationStore(SQLBaseStore): @defer.inlineCallbacks def register(self, user_id, token, password_hash, - was_guest=False, make_guest=False, appservice_id=None): + was_guest=False, make_guest=False, appservice_id=None, + create_profile_with_localpart=None): """Attempts to register an account. Args: @@ -88,6 +89,8 @@ class RegistrationStore(SQLBaseStore): make_guest (boolean): True if the the new user should be guest, false to add a regular user account. appservice_id (str): The ID of the appservice registering the user. + create_profile_with_localpart (str): Optionally create a profile for + the given localpart. Raises: StoreError if the user_id could not be registered. """ @@ -99,7 +102,8 @@ class RegistrationStore(SQLBaseStore): password_hash, was_guest, make_guest, - appservice_id + appservice_id, + create_profile_with_localpart, ) self.get_user_by_id.invalidate((user_id,)) self.is_guest.invalidate((user_id,)) @@ -112,7 +116,8 @@ class RegistrationStore(SQLBaseStore): password_hash, was_guest, make_guest, - appservice_id + appservice_id, + create_profile_with_localpart, ): now = int(self.clock.time()) @@ -157,6 +162,12 @@ class RegistrationStore(SQLBaseStore): (next_id, user_id, token,) ) + if create_profile_with_localpart: + txn.execute( + "INSERT INTO profiles(user_id) VALUES (?)", + (create_profile_with_localpart,) + ) + @cached() def get_user_by_id(self, user_id): return self._simple_select_one( diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index d7cccc06b1..e68f94ce77 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -27,10 +27,6 @@ import logging logger = logging.getLogger(__name__) -def registered_user(distributor, user): - return distributor.fire("registered_user", user) - - def user_left_room(distributor, user, room_id): return preserve_context_over_fn( distributor.fire, -- cgit 1.4.1 From 0c13d45522c5c8c0b68322498a220969eb894ad5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 19:18:53 +0100 Subject: Add a comment on why we don't create a profile for upgrading users --- synapse/handlers/register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4fb12915dc..0b7517221d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -138,6 +138,7 @@ class RegistrationHandler(BaseHandler): was_guest=was_guest, make_guest=make_guest, create_profile_with_localpart=( + # If the user was a guest then they already have a profile None if was_guest else user.localpart ), ) -- cgit 1.4.1 From 41e4b2efeafa6e2f4cbfef4f30620b9f58b020a4 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 19:20:47 +0100 Subject: Add the create_profile method back since the tests use it --- synapse/storage/profile.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index c3c3f9ffd8..26a40905ae 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -17,6 +17,12 @@ from ._base import SQLBaseStore class ProfileStore(SQLBaseStore): + def create_profile(self, user_localpart): + return self._simple_insert( + table="profiles", + values={"user_id": user_localpart}, + desc="create_profile", + ) def get_profile_displayname(self, user_localpart): return self._simple_select_one_onecol( -- cgit 1.4.1 From 4d362a61ea3173f1be0ac58147db29acfbe1b4c3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jun 2016 14:17:15 +0100 Subject: Bump version and changelog --- CHANGES.rst | 15 +++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1a47aae857..ecaaa189d0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,18 @@ +Changes in synapse v0.16.1 (2016-06-20) +======================================= + +Bug fixes: + +* Fix assorted bugs in ``/preview_url`` (PR #872) +* Fix TypeError when setting unicode passwords (PR #873) + + +Performance improvements: + +* Turn ``use_frozen_events`` off by default (PR #877) +* Disable responding with canonical json for federation (PR #878) + + Changes in synapse v0.16.1-rc1 (2016-06-15) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index faaa86d972..3cd79b1247 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.1-rc1" +__version__ = "0.16.1" -- cgit 1.4.1