diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index 704181d2d3..594754cfd8 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -167,4 +167,4 @@ class BaseHandler(object):
ratelimit=False,
)
except Exception as e:
- logger.warn("Error kicking guest user: %s" % (e,))
+ logger.exception("Error kicking guest user: %s" % (e,))
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 8955cde4ed..c708c35d4d 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -20,7 +20,11 @@ from twisted.internet import defer
from synapse.api import errors
from synapse.api.constants import EventTypes
-from synapse.api.errors import FederationDeniedError
+from synapse.api.errors import (
+ FederationDeniedError,
+ HttpResponseException,
+ RequestSendFailed,
+)
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
@@ -504,13 +508,13 @@ class DeviceListEduUpdater(object):
origin = get_domain_from_id(user_id)
try:
result = yield self.federation.query_user_devices(origin, user_id)
- except NotRetryingDestination:
+ except (
+ NotRetryingDestination, RequestSendFailed, HttpResponseException,
+ ):
# TODO: Remember that we are now out of sync and try again
# later
logger.warn(
- "Failed to handle device list update for %s,"
- " we're not retrying the remote",
- user_id,
+ "Failed to handle device list update for %s", user_id,
)
# We abort on exceptions rather than accepting the update
# as otherwise synapse will 'forget' that its device list
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 6bb254f899..8b113307d2 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -112,7 +112,9 @@ class DirectoryHandler(BaseHandler):
403, "This user is not permitted to create this alias",
)
- if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
+ if not self.config.is_alias_creation_allowed(
+ user_id, room_id, room_alias.to_string(),
+ ):
# Lets just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule?
@@ -395,9 +397,9 @@ class DirectoryHandler(BaseHandler):
room_id (str)
visibility (str): "public" or "private"
"""
- if not self.spam_checker.user_may_publish_room(
- requester.user.to_string(), room_id
- ):
+ user_id = requester.user.to_string()
+
+ if not self.spam_checker.user_may_publish_room(user_id, room_id):
raise AuthError(
403,
"This user is not permitted to publish rooms to the room list"
@@ -415,7 +417,24 @@ class DirectoryHandler(BaseHandler):
yield self.auth.check_can_change_room_list(room_id, requester.user)
- yield self.store.set_room_is_public(room_id, visibility == "public")
+ making_public = visibility == "public"
+ if making_public:
+ room_aliases = yield self.store.get_aliases_for_room(room_id)
+ canonical_alias = yield self.store.get_canonical_alias_for_room(room_id)
+ if canonical_alias:
+ room_aliases.append(canonical_alias)
+
+ if not self.config.is_publishing_room_allowed(
+ user_id, room_id, room_aliases,
+ ):
+ # Lets just return a generic message, as there may be all sorts of
+ # reasons why we said no. TODO: Allow configurable error messages
+ # per alias creation rule?
+ raise SynapseError(
+ 403, "Not allowed to publish room",
+ )
+
+ yield self.store.set_room_is_public(room_id, making_public)
@defer.inlineCallbacks
def edit_published_appservice_room_list(self, appservice_id, network_id,
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 173315af6c..02c508acec 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -20,7 +20,7 @@ from six import iteritems
from twisted.internet import defer
-from synapse.api.errors import HttpResponseException, SynapseError
+from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.types import get_domain_from_id
logger = logging.getLogger(__name__)
@@ -46,13 +46,19 @@ def _create_rerouter(func_name):
# when the remote end responds with things like 403 Not
# In Group, we can communicate that to the client instead
# of a 500.
- def h(failure):
+ def http_response_errback(failure):
failure.trap(HttpResponseException)
e = failure.value
if e.code == 403:
raise e.to_synapse_error()
return failure
- d.addErrback(h)
+
+ def request_failed_errback(failure):
+ failure.trap(RequestSendFailed)
+ raise SynapseError(502, "Failed to contact group server")
+
+ d.addErrback(http_response_errback)
+ d.addErrback(request_failed_errback)
return d
return f
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 21c17c59a0..f92ab4d525 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -27,6 +27,8 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.http.client import CaptchaServerHttpClient
+from synapse.replication.http.login import RegisterDeviceReplicationServlet
+from synapse.replication.http.register import ReplicationRegisterServlet
from synapse.types import RoomAlias, RoomID, UserID, create_requester
from synapse.util.async_helpers import Linearizer
from synapse.util.threepids import check_3pid_allowed
@@ -61,6 +63,14 @@ class RegistrationHandler(BaseHandler):
)
self._server_notices_mxid = hs.config.server_notices_mxid
+ if hs.config.worker_app:
+ self._register_client = ReplicationRegisterServlet.make_client(hs)
+ self._register_device_client = (
+ RegisterDeviceReplicationServlet.make_client(hs)
+ )
+ else:
+ self.device_handler = hs.get_device_handler()
+
@defer.inlineCallbacks
def check_username(self, localpart, guest_access_token=None,
assigned_user_id=None):
@@ -155,7 +165,7 @@ class RegistrationHandler(BaseHandler):
yield self.auth.check_auth_blocking(threepid=threepid)
password_hash = None
if password:
- password_hash = yield self.auth_handler().hash(password)
+ password_hash = yield self._auth_handler.hash(password)
if localpart:
yield self.check_username(localpart, guest_access_token=guest_access_token)
@@ -185,7 +195,7 @@ class RegistrationHandler(BaseHandler):
token = None
if generate_token:
token = self.macaroon_gen.generate_access_token(user_id)
- yield self.store.register(
+ yield self._register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
@@ -217,7 +227,7 @@ class RegistrationHandler(BaseHandler):
if default_display_name is None:
default_display_name = localpart
try:
- yield self.store.register(
+ yield self._register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
@@ -316,7 +326,7 @@ class RegistrationHandler(BaseHandler):
user_id, allowed_appservice=service
)
- yield self.store.register(
+ yield self._register_with_store(
user_id=user_id,
password_hash="",
appservice_id=service_id,
@@ -494,7 +504,7 @@ class RegistrationHandler(BaseHandler):
token = self.macaroon_gen.generate_access_token(user_id)
if need_register:
- yield self.store.register(
+ yield self._register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
@@ -512,9 +522,6 @@ class RegistrationHandler(BaseHandler):
defer.returnValue((user_id, token))
- def auth_handler(self):
- return self.hs.get_auth_handler()
-
@defer.inlineCallbacks
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
"""Get a guest access token for a 3PID, creating a guest account if
@@ -573,3 +580,94 @@ class RegistrationHandler(BaseHandler):
action="join",
ratelimit=False,
)
+
+ def _register_with_store(self, user_id, token=None, password_hash=None,
+ was_guest=False, make_guest=False, appservice_id=None,
+ create_profile_with_displayname=None, admin=False,
+ user_type=None):
+ """Register user in the datastore.
+
+ Args:
+ user_id (str): The desired user ID to register.
+ token (str): The desired access token to use for this user. If this
+ is not None, the given access token is associated with the user
+ id.
+ password_hash (str|None): Optional. The password hash for this user.
+ was_guest (bool): Optional. Whether this is a guest account being
+ upgraded to a non-guest account.
+ make_guest (boolean): True if the the new user should be guest,
+ false to add a regular user account.
+ appservice_id (str|None): The ID of the appservice registering the user.
+ create_profile_with_displayname (unicode|None): Optionally create a
+ profile for the user, setting their displayname to the given value
+ admin (boolean): is an admin user?
+ user_type (str|None): type of user. One of the values from
+ api.constants.UserTypes, or None for a normal user.
+
+ Returns:
+ Deferred
+ """
+ if self.hs.config.worker_app:
+ return self._register_client(
+ user_id=user_id,
+ token=token,
+ password_hash=password_hash,
+ was_guest=was_guest,
+ make_guest=make_guest,
+ appservice_id=appservice_id,
+ create_profile_with_displayname=create_profile_with_displayname,
+ admin=admin,
+ user_type=user_type,
+ )
+ else:
+ return self.store.register(
+ user_id=user_id,
+ token=token,
+ password_hash=password_hash,
+ was_guest=was_guest,
+ make_guest=make_guest,
+ appservice_id=appservice_id,
+ create_profile_with_displayname=create_profile_with_displayname,
+ admin=admin,
+ user_type=user_type,
+ )
+
+ @defer.inlineCallbacks
+ def register_device(self, user_id, device_id, initial_display_name,
+ is_guest=False):
+ """Register a device for a user and generate an access token.
+
+ Args:
+ user_id (str): full canonical @user:id
+ device_id (str|None): The device ID to check, or None to generate
+ a new one.
+ initial_display_name (str|None): An optional display name for the
+ device.
+ is_guest (bool): Whether this is a guest account
+
+ Returns:
+ defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
+ """
+
+ if self.hs.config.worker_app:
+ r = yield self._register_device_client(
+ user_id=user_id,
+ device_id=device_id,
+ initial_display_name=initial_display_name,
+ is_guest=is_guest,
+ )
+ defer.returnValue((r["device_id"], r["access_token"]))
+ else:
+ device_id = yield self.device_handler.check_device_registered(
+ user_id, device_id, initial_display_name
+ )
+ if is_guest:
+ access_token = self.macaroon_gen.generate_access_token(
+ user_id, ["guest = true"]
+ )
+ else:
+ access_token = yield self._auth_handler.get_access_token_for_user_id(
+ user_id, device_id=device_id,
+ )
+
+ defer.returnValue((device_id, access_token))
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 5e40e9ea46..f9af1f0046 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -284,6 +284,7 @@ class RoomCreationHandler(BaseHandler):
(EventTypes.GuestAccess, ""),
(EventTypes.RoomAvatar, ""),
(EventTypes.Encryption, ""),
+ (EventTypes.ServerACL, ""),
)
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 120815b09b..283c6c1b81 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -130,7 +130,7 @@ class UserDirectoryHandler(object):
# Support users are for diagnostics and should not appear in the user directory.
if not is_support:
yield self.store.update_profile_in_user_dir(
- user_id, profile.display_name, profile.avatar_url, None,
+ user_id, profile.display_name, profile.avatar_url, None
)
@defer.inlineCallbacks
@@ -166,8 +166,9 @@ class UserDirectoryHandler(object):
self.pos = deltas[-1]["stream_id"]
# Expose current event processing position to prometheus
- synapse.metrics.event_processing_positions.labels(
- "user_dir").set(self.pos)
+ synapse.metrics.event_processing_positions.labels("user_dir").set(
+ self.pos
+ )
yield self.store.update_user_directory_stream_pos(self.pos)
@@ -191,21 +192,25 @@ class UserDirectoryHandler(object):
logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
yield self._handle_initial_room(room_id)
num_processed_rooms += 1
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+ yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
logger.info("Processed all rooms.")
if self.search_all_users:
num_processed_users = 0
user_ids = yield self.store.get_all_local_users()
- logger.info("Doing initial update of user directory. %d users", len(user_ids))
+ logger.info(
+ "Doing initial update of user directory. %d users", len(user_ids)
+ )
for user_id in user_ids:
# We add profiles for all users even if they don't match the
# include pattern, just in case we want to change it in future
- logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
+ logger.info(
+ "Handling user %d/%d", num_processed_users + 1, len(user_ids)
+ )
yield self._handle_local_user(user_id)
num_processed_users += 1
- yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
+ yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)
logger.info("Processed all users")
@@ -224,24 +229,24 @@ class UserDirectoryHandler(object):
if not is_in_room:
return
- is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)
+ is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
+ room_id
+ )
users_with_profile = yield self.state.get_current_user_in_room(room_id)
user_ids = set(users_with_profile)
unhandled_users = user_ids - self.initially_handled_users
yield self.store.add_profiles_to_user_dir(
- room_id, {
- user_id: users_with_profile[user_id] for user_id in unhandled_users
- }
+ room_id,
+ {user_id: users_with_profile[user_id] for user_id in unhandled_users},
)
self.initially_handled_users |= unhandled_users
if is_public:
yield self.store.add_users_to_public_room(
- room_id,
- user_ids=user_ids - self.initially_handled_users_in_public
+ room_id, user_ids=user_ids - self.initially_handled_users_in_public
)
self.initially_handled_users_in_public |= user_ids
@@ -253,7 +258,7 @@ class UserDirectoryHandler(object):
count = 0
for user_id in user_ids:
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+ yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
if not self.is_mine_id(user_id):
count += 1
@@ -268,7 +273,7 @@ class UserDirectoryHandler(object):
continue
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
+ yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
count += 1
user_set = (user_id, other_user_id)
@@ -290,25 +295,23 @@ class UserDirectoryHandler(object):
if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
yield self.store.add_users_who_share_room(
- room_id, not is_public, to_insert,
+ room_id, not is_public, to_insert
)
to_insert.clear()
if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
yield self.store.update_users_who_share_room(
- room_id, not is_public, to_update,
+ room_id, not is_public, to_update
)
to_update.clear()
if to_insert:
- yield self.store.add_users_who_share_room(
- room_id, not is_public, to_insert,
- )
+ yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
to_insert.clear()
if to_update:
yield self.store.update_users_who_share_room(
- room_id, not is_public, to_update,
+ room_id, not is_public, to_update
)
to_update.clear()
@@ -329,11 +332,12 @@ class UserDirectoryHandler(object):
# may have become public or not and add/remove the users in said room
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
yield self._handle_room_publicity_change(
- room_id, prev_event_id, event_id, typ,
+ room_id, prev_event_id, event_id, typ
)
elif typ == EventTypes.Member:
change = yield self._get_key_change(
- prev_event_id, event_id,
+ prev_event_id,
+ event_id,
key_name="membership",
public_value=Membership.JOIN,
)
@@ -342,14 +346,16 @@ class UserDirectoryHandler(object):
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
is_in_room = yield self.store.is_host_joined(
- room_id, self.server_name,
+ room_id, self.server_name
)
if not is_in_room:
logger.info("Server left room: %r", room_id)
# Fetch all the users that we marked as being in user
# directory due to being in the room and then check if
# need to remove those users or not
- user_ids = yield self.store.get_users_in_dir_due_to_room(room_id)
+ user_ids = yield self.store.get_users_in_dir_due_to_room(
+ room_id
+ )
for user_id in user_ids:
yield self._handle_remove_user(room_id, user_id)
return
@@ -361,7 +367,7 @@ class UserDirectoryHandler(object):
if change is None:
# Handle any profile changes
yield self._handle_profile_change(
- state_key, room_id, prev_event_id, event_id,
+ state_key, room_id, prev_event_id, event_id
)
continue
@@ -393,13 +399,15 @@ class UserDirectoryHandler(object):
if typ == EventTypes.RoomHistoryVisibility:
change = yield self._get_key_change(
- prev_event_id, event_id,
+ prev_event_id,
+ event_id,
key_name="history_visibility",
public_value="world_readable",
)
elif typ == EventTypes.JoinRules:
change = yield self._get_key_change(
- prev_event_id, event_id,
+ prev_event_id,
+ event_id,
key_name="join_rule",
public_value=JoinRules.PUBLIC,
)
@@ -524,7 +532,7 @@ class UserDirectoryHandler(object):
)
if self.is_mine_id(other_user_id) and not is_appservice:
shared_is_private = yield self.store.get_if_users_share_a_room(
- other_user_id, user_id,
+ other_user_id, user_id
)
if shared_is_private is True:
# We've already marked in the database they share a private room
@@ -539,13 +547,11 @@ class UserDirectoryHandler(object):
to_insert.add((other_user_id, user_id))
if to_insert:
- yield self.store.add_users_who_share_room(
- room_id, not is_public, to_insert,
- )
+ yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
if to_update:
yield self.store.update_users_who_share_room(
- room_id, not is_public, to_update,
+ room_id, not is_public, to_update
)
@defer.inlineCallbacks
@@ -564,15 +570,15 @@ class UserDirectoryHandler(object):
row = yield self.store.get_user_in_public_room(user_id)
update_user_in_public = row and row["room_id"] == room_id
- if (update_user_in_public or update_user_dir):
+ if update_user_in_public or update_user_dir:
# XXX: Make this faster?
rooms = yield self.store.get_rooms_for_user(user_id)
for j_room_id in rooms:
- if (not update_user_in_public and not update_user_dir):
+ if not update_user_in_public and not update_user_dir:
break
is_in_room = yield self.store.is_host_joined(
- j_room_id, self.server_name,
+ j_room_id, self.server_name
)
if not is_in_room:
@@ -600,19 +606,19 @@ class UserDirectoryHandler(object):
# Get a list of user tuples that were in the DB due to this room and
# users (this includes tuples where the other user matches `user_id`)
user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
- user_id, room_id,
+ user_id, room_id
)
for user_id, other_user_id in user_tuples:
# For each user tuple get a list of rooms that they still share,
# trying to find a private room, and update the entry in the DB
- rooms = yield self.store.get_rooms_in_common_for_users(user_id, other_user_id)
+ rooms = yield self.store.get_rooms_in_common_for_users(
+ user_id, other_user_id
+ )
# If they dont share a room anymore, remove the mapping
if not rooms:
- yield self.store.remove_user_who_share_room(
- user_id, other_user_id,
- )
+ yield self.store.remove_user_who_share_room(user_id, other_user_id)
continue
found_public_share = None
@@ -626,13 +632,13 @@ class UserDirectoryHandler(object):
else:
found_public_share = None
yield self.store.update_users_who_share_room(
- room_id, not is_public, [(user_id, other_user_id)],
+ room_id, not is_public, [(user_id, other_user_id)]
)
break
if found_public_share:
yield self.store.update_users_who_share_room(
- room_id, not is_public, [(user_id, other_user_id)],
+ room_id, not is_public, [(user_id, other_user_id)]
)
@defer.inlineCallbacks
@@ -660,7 +666,7 @@ class UserDirectoryHandler(object):
if prev_name != new_name or prev_avatar != new_avatar:
yield self.store.update_profile_in_user_dir(
- user_id, new_name, new_avatar, room_id,
+ user_id, new_name, new_avatar, room_id
)
@defer.inlineCallbacks
|