diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 329e3c7d71..a958c45271 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -59,6 +59,7 @@ class AuthHandler(BaseHandler):
LoginType.EMAIL_IDENTITY: self._check_email_identity,
LoginType.MSISDN: self._check_msisdn,
LoginType.DUMMY: self._check_dummy_auth,
+ LoginType.TERMS: self._check_terms_auth,
}
self.bcrypt_rounds = hs.config.bcrypt_rounds
@@ -431,6 +432,9 @@ class AuthHandler(BaseHandler):
def _check_dummy_auth(self, authdict, _):
return defer.succeed(True)
+ def _check_terms_auth(self, authdict, _):
+ return defer.succeed(True)
+
@defer.inlineCallbacks
def _check_threepid(self, medium, authdict):
if 'threepid_creds' not in authdict:
@@ -462,6 +466,22 @@ class AuthHandler(BaseHandler):
def _get_params_recaptcha(self):
return {"public_key": self.hs.config.recaptcha_public_key}
+ def _get_params_terms(self):
+ return {
+ "policies": {
+ "privacy_policy": {
+ "version": self.hs.config.user_consent_version,
+ "en": {
+ "name": self.hs.config.user_consent_policy_name,
+ "url": "%s/_matrix/consent?v=%s" % (
+ self.hs.config.public_baseurl,
+ self.hs.config.user_consent_version,
+ ),
+ },
+ },
+ },
+ }
+
def _auth_dict_for_flows(self, flows, session):
public_flows = []
for f in flows:
@@ -469,6 +489,7 @@ class AuthHandler(BaseHandler):
get_params = {
LoginType.RECAPTCHA: self._get_params_recaptcha,
+ LoginType.TERMS: self._get_params_terms,
}
params = {}
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 7d67bf803a..0699731c13 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -138,9 +138,30 @@ class DirectoryHandler(BaseHandler):
)
@defer.inlineCallbacks
- def delete_association(self, requester, room_alias):
- # association deletion for human users
+ def delete_association(self, requester, room_alias, send_event=True):
+ """Remove an alias from the directory
+ (this is only meant for human users; AS users should call
+ delete_appservice_association)
+
+ Args:
+ requester (Requester):
+ room_alias (RoomAlias):
+ send_event (bool): Whether to send an updated m.room.aliases event.
+ Note that, if we delete the canonical alias, we will always attempt
+ to send an m.room.canonical_alias event
+
+ Returns:
+ Deferred[unicode]: room id that the alias used to point to
+
+ Raises:
+ NotFoundError: if the alias doesn't exist
+
+ AuthError: if the user doesn't have perms to delete the alias (ie, the user
+ is neither the creator of the alias, nor a server admin.
+
+ SynapseError: if the alias belongs to an AS
+ """
user_id = requester.user.to_string()
try:
@@ -168,10 +189,11 @@ class DirectoryHandler(BaseHandler):
room_id = yield self._delete_association(room_alias)
try:
- yield self.send_room_alias_update_event(
- requester,
- room_id
- )
+ if send_event:
+ yield self.send_room_alias_update_event(
+ requester,
+ room_id
+ )
yield self._update_canonical_alias(
requester,
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 5edb3cfe04..42b040375f 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -19,7 +19,7 @@ from six import iteritems
from twisted.internet import defer
-from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError
+from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError
from synapse.util.async_helpers import Linearizer
logger = logging.getLogger(__name__)
@@ -55,6 +55,8 @@ class E2eRoomKeysHandler(object):
room_id(string): room ID to get keys for, for None to get keys for all rooms
session_id(string): session ID to get keys for, for None to get keys for all
sessions
+ Raises:
+ NotFoundError: if the backup version does not exist
Returns:
A deferred list of dicts giving the session_data and message metadata for
these room keys.
@@ -63,13 +65,19 @@ class E2eRoomKeysHandler(object):
# we deliberately take the lock to get keys so that changing the version
# works atomically
with (yield self._upload_linearizer.queue(user_id)):
+ # make sure the backup version exists
+ try:
+ yield self.store.get_e2e_room_keys_version_info(user_id, version)
+ except StoreError as e:
+ if e.code == 404:
+ raise NotFoundError("Unknown backup version")
+ else:
+ raise
+
results = yield self.store.get_e2e_room_keys(
user_id, version, room_id, session_id
)
- if results['rooms'] == {}:
- raise SynapseError(404, "No room_keys found")
-
defer.returnValue(results)
@defer.inlineCallbacks
@@ -120,7 +128,7 @@ class E2eRoomKeysHandler(object):
}
Raises:
- SynapseError: with code 404 if there are no versions defined
+ NotFoundError: if there are no versions defined
RoomKeysVersionError: if the uploaded version is not the current version
"""
@@ -134,7 +142,7 @@ class E2eRoomKeysHandler(object):
version_info = yield self.store.get_e2e_room_keys_version_info(user_id)
except StoreError as e:
if e.code == 404:
- raise SynapseError(404, "Version '%s' not found" % (version,))
+ raise NotFoundError("Version '%s' not found" % (version,))
else:
raise
@@ -148,7 +156,7 @@ class E2eRoomKeysHandler(object):
raise RoomKeysVersionError(current_version=version_info['version'])
except StoreError as e:
if e.code == 404:
- raise SynapseError(404, "Version '%s' not found" % (version,))
+ raise NotFoundError("Version '%s' not found" % (version,))
else:
raise
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index cd5b9bbb19..a3bb864bb2 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -202,27 +202,22 @@ class FederationHandler(BaseHandler):
self.room_queues[room_id].append((pdu, origin))
return
- # If we're no longer in the room just ditch the event entirely. This
- # is probably an old server that has come back and thinks we're still
- # in the room (or we've been rejoined to the room by a state reset).
+ # If we're not in the room just ditch the event entirely. This is
+ # probably an old server that has come back and thinks we're still in
+ # the room (or we've been rejoined to the room by a state reset).
#
- # If we were never in the room then maybe our database got vaped and
- # we should check if we *are* in fact in the room. If we are then we
- # can magically rejoin the room.
+ # Note that if we were never in the room then we would have already
+ # dropped the event, since we wouldn't know the room version.
is_in_room = yield self.auth.check_host_in_room(
room_id,
self.server_name
)
if not is_in_room:
- was_in_room = yield self.store.was_host_joined(
- pdu.room_id, self.server_name,
+ logger.info(
+ "[%s %s] Ignoring PDU from %s as we're not in the room",
+ room_id, event_id, origin,
)
- if was_in_room:
- logger.info(
- "[%s %s] Ignoring PDU from %s as we've left the room",
- room_id, event_id, origin,
- )
- defer.returnValue(None)
+ defer.returnValue(None)
state = None
auth_chain = []
@@ -239,7 +234,7 @@ class FederationHandler(BaseHandler):
room_id, event_id, min_depth,
)
- prevs = {e_id for e_id, _ in pdu.prev_events}
+ prevs = set(pdu.prev_event_ids())
seen = yield self.store.have_seen_events(prevs)
if min_depth and pdu.depth < min_depth:
@@ -557,86 +552,54 @@ class FederationHandler(BaseHandler):
room_id, event_id, event,
)
- # FIXME (erikj): Awful hack to make the case where we are not currently
- # in the room work
- # If state and auth_chain are None, then we don't need to do this check
- # as we already know we have enough state in the DB to handle this
- # event.
- if state and auth_chain and not event.internal_metadata.is_outlier():
- is_in_room = yield self.auth.check_host_in_room(
- room_id,
- self.server_name
- )
- else:
- is_in_room = True
-
- if not is_in_room:
- logger.info(
- "[%s %s] Got event for room we're not in",
- room_id, event_id,
- )
-
- try:
- yield self._persist_auth_tree(
- origin, auth_chain, state, event
- )
- except AuthError as e:
- raise FederationError(
- "ERROR",
- e.code,
- e.msg,
- affected=event_id,
- )
-
- else:
- event_ids = set()
- if state:
- event_ids |= {e.event_id for e in state}
- if auth_chain:
- event_ids |= {e.event_id for e in auth_chain}
+ event_ids = set()
+ if state:
+ event_ids |= {e.event_id for e in state}
+ if auth_chain:
+ event_ids |= {e.event_id for e in auth_chain}
- seen_ids = yield self.store.have_seen_events(event_ids)
+ seen_ids = yield self.store.have_seen_events(event_ids)
- if state and auth_chain is not None:
- # If we have any state or auth_chain given to us by the replication
- # layer, then we should handle them (if we haven't before.)
+ if state and auth_chain is not None:
+ # If we have any state or auth_chain given to us by the replication
+ # layer, then we should handle them (if we haven't before.)
- event_infos = []
+ event_infos = []
- for e in itertools.chain(auth_chain, state):
- if e.event_id in seen_ids:
- continue
- e.internal_metadata.outlier = True
- auth_ids = [e_id for e_id, _ in e.auth_events]
- auth = {
- (e.type, e.state_key): e for e in auth_chain
- if e.event_id in auth_ids or e.type == EventTypes.Create
- }
- event_infos.append({
- "event": e,
- "auth_events": auth,
- })
- seen_ids.add(e.event_id)
+ for e in itertools.chain(auth_chain, state):
+ if e.event_id in seen_ids:
+ continue
+ e.internal_metadata.outlier = True
+ auth_ids = e.auth_event_ids()
+ auth = {
+ (e.type, e.state_key): e for e in auth_chain
+ if e.event_id in auth_ids or e.type == EventTypes.Create
+ }
+ event_infos.append({
+ "event": e,
+ "auth_events": auth,
+ })
+ seen_ids.add(e.event_id)
- logger.info(
- "[%s %s] persisting newly-received auth/state events %s",
- room_id, event_id, [e["event"].event_id for e in event_infos]
- )
- yield self._handle_new_events(origin, event_infos)
+ logger.info(
+ "[%s %s] persisting newly-received auth/state events %s",
+ room_id, event_id, [e["event"].event_id for e in event_infos]
+ )
+ yield self._handle_new_events(origin, event_infos)
- try:
- context = yield self._handle_new_event(
- origin,
- event,
- state=state,
- )
- except AuthError as e:
- raise FederationError(
- "ERROR",
- e.code,
- e.msg,
- affected=event.event_id,
- )
+ try:
+ context = yield self._handle_new_event(
+ origin,
+ event,
+ state=state,
+ )
+ except AuthError as e:
+ raise FederationError(
+ "ERROR",
+ e.code,
+ e.msg,
+ affected=event.event_id,
+ )
room = yield self.store.get_room(room_id)
@@ -726,7 +689,7 @@ class FederationHandler(BaseHandler):
edges = [
ev.event_id
for ev in events
- if set(e_id for e_id, _ in ev.prev_events) - event_ids
+ if set(ev.prev_event_ids()) - event_ids
]
logger.info(
@@ -753,7 +716,7 @@ class FederationHandler(BaseHandler):
required_auth = set(
a_id
for event in events + list(state_events.values()) + list(auth_events.values())
- for a_id, _ in event.auth_events
+ for a_id in event.auth_event_ids()
)
auth_events.update({
e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
@@ -769,7 +732,7 @@ class FederationHandler(BaseHandler):
auth_events.update(ret_events)
required_auth.update(
- a_id for event in ret_events.values() for a_id, _ in event.auth_events
+ a_id for event in ret_events.values() for a_id in event.auth_event_ids()
)
missing_auth = required_auth - set(auth_events)
@@ -796,7 +759,7 @@ class FederationHandler(BaseHandler):
required_auth.update(
a_id
for event in results if event
- for a_id, _ in event.auth_events
+ for a_id in event.auth_event_ids()
)
missing_auth = required_auth - set(auth_events)
@@ -816,7 +779,7 @@ class FederationHandler(BaseHandler):
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
- for a_id, _ in a.auth_events
+ for a_id in a.auth_event_ids()
if a_id in auth_events
}
})
@@ -828,7 +791,7 @@ class FederationHandler(BaseHandler):
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
- for a_id, _ in event_map[e_id].auth_events
+ for a_id in event_map[e_id].auth_event_ids()
if a_id in auth_events
}
})
@@ -1041,17 +1004,17 @@ class FederationHandler(BaseHandler):
Raises:
SynapseError if the event does not pass muster
"""
- if len(ev.prev_events) > 20:
+ if len(ev.prev_event_ids()) > 20:
logger.warn("Rejecting event %s which has %i prev_events",
- ev.event_id, len(ev.prev_events))
+ ev.event_id, len(ev.prev_event_ids()))
raise SynapseError(
http_client.BAD_REQUEST,
"Too many prev_events",
)
- if len(ev.auth_events) > 10:
+ if len(ev.auth_event_ids()) > 10:
logger.warn("Rejecting event %s which has %i auth_events",
- ev.event_id, len(ev.auth_events))
+ ev.event_id, len(ev.auth_event_ids()))
raise SynapseError(
http_client.BAD_REQUEST,
"Too many auth_events",
@@ -1076,7 +1039,7 @@ class FederationHandler(BaseHandler):
def on_event_auth(self, event_id):
event = yield self.store.get_event(event_id)
auth = yield self.store.get_auth_chain(
- [auth_id for auth_id, _ in event.auth_events],
+ [auth_id for auth_id in event.auth_event_ids()],
include_given=True
)
defer.returnValue([e for e in auth])
@@ -1698,7 +1661,7 @@ class FederationHandler(BaseHandler):
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
- for e_id, _ in e.auth_events:
+ for e_id in e.auth_event_ids():
if e_id not in event_map:
missing_auth_events.add(e_id)
@@ -1717,7 +1680,7 @@ class FederationHandler(BaseHandler):
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
- for e_id, _ in e.auth_events
+ for e_id in e.auth_event_ids()
if e_id in event_map
}
if create_event:
@@ -1785,10 +1748,10 @@ class FederationHandler(BaseHandler):
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
- if event.type == EventTypes.Member and not event.auth_events:
- if len(event.prev_events) == 1 and event.depth < 5:
+ if event.type == EventTypes.Member and not event.auth_event_ids():
+ if len(event.prev_event_ids()) == 1 and event.depth < 5:
c = yield self.store.get_event(
- event.prev_events[0][0],
+ event.prev_event_ids()[0],
allow_none=True,
)
if c and c.type == EventTypes.Create:
@@ -1835,7 +1798,7 @@ class FederationHandler(BaseHandler):
# Now get the current auth_chain for the event.
local_auth_chain = yield self.store.get_auth_chain(
- [auth_id for auth_id, _ in event.auth_events],
+ [auth_id for auth_id in event.auth_event_ids()],
include_given=True
)
@@ -1891,7 +1854,7 @@ class FederationHandler(BaseHandler):
"""
# Check if we have all the auth events.
current_state = set(e.event_id for e in auth_events.values())
- event_auth_events = set(e_id for e_id, _ in event.auth_events)
+ event_auth_events = set(event.auth_event_ids())
if event.is_state():
event_key = (event.type, event.state_key)
@@ -1935,7 +1898,7 @@ class FederationHandler(BaseHandler):
continue
try:
- auth_ids = [e_id for e_id, _ in e.auth_events]
+ auth_ids = e.auth_event_ids()
auth = {
(e.type, e.state_key): e for e in remote_auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
@@ -1956,7 +1919,7 @@ class FederationHandler(BaseHandler):
pass
have_events = yield self.store.get_seen_events_with_rejections(
- [e_id for e_id, _ in event.auth_events]
+ event.auth_event_ids()
)
seen_events = set(have_events.keys())
except Exception:
@@ -2058,7 +2021,7 @@ class FederationHandler(BaseHandler):
continue
try:
- auth_ids = [e_id for e_id, _ in ev.auth_events]
+ auth_ids = ev.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in result["auth_chain"]
@@ -2250,7 +2213,7 @@ class FederationHandler(BaseHandler):
missing_remote_ids = [e.event_id for e in missing_remotes]
base_remote_rejected = list(missing_remotes)
for e in missing_remotes:
- for e_id, _ in e.auth_events:
+ for e_id in e.auth_event_ids():
if e_id in missing_remote_ids:
try:
base_remote_rejected.remove(e)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 969e588e73..a7cd779b02 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -427,6 +427,9 @@ class EventCreationHandler(object):
if event.is_state():
prev_state = yield self.deduplicate_state_event(event, context)
+ logger.info(
+ "Not bothering to persist duplicate state event %s", event.event_id,
+ )
if prev_state is not None:
defer.returnValue(prev_state)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index e9d7b25a36..d2beb275cf 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -50,7 +50,6 @@ class RegistrationHandler(BaseHandler):
self._auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
- self.room_creation_handler = self.hs.get_room_creation_handler()
self.captcha_client = CaptchaServerHttpClient(hs)
self._next_generated_user_id = None
@@ -241,7 +240,10 @@ class RegistrationHandler(BaseHandler):
else:
# create room expects the localpart of the room alias
room_alias_localpart = room_alias.localpart
- yield self.room_creation_handler.create_room(
+
+ # getting the RoomCreationHandler during init gives a dependency
+ # loop
+ yield self.hs.get_room_creation_handler().create_room(
fake_requester,
config={
"preset": "public_chat",
@@ -254,9 +256,6 @@ class RegistrationHandler(BaseHandler):
except Exception as e:
logger.error("Failed to join new user to %r: %r", r, e)
- # We used to generate default identicons here, but nowadays
- # we want clients to generate their own as part of their branding
- # rather than there being consistent matrix-wide ones, so we don't.
defer.returnValue((user_id, token))
@defer.inlineCallbacks
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 3ba92bdb4c..3928faa6e7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -21,7 +21,7 @@ import math
import string
from collections import OrderedDict
-from six import string_types
+from six import iteritems, string_types
from twisted.internet import defer
@@ -32,10 +32,11 @@ from synapse.api.constants import (
JoinRules,
RoomCreationPreset,
)
-from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
+from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
from synapse.util import stringutils
+from synapse.util.async_helpers import Linearizer
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
@@ -73,6 +74,334 @@ class RoomCreationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.room_member_handler = hs.get_room_member_handler()
+
+ # linearizer to stop two upgrades happening at once
+ self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
+
+ @defer.inlineCallbacks
+ def upgrade_room(self, requester, old_room_id, new_version):
+ """Replace a room with a new room with a different version
+
+ Args:
+ requester (synapse.types.Requester): the user requesting the upgrade
+ old_room_id (unicode): the id of the room to be replaced
+ new_version (unicode): the new room version to use
+
+ Returns:
+ Deferred[unicode]: the new room id
+ """
+ yield self.ratelimit(requester)
+
+ user_id = requester.user.to_string()
+
+ with (yield self._upgrade_linearizer.queue(old_room_id)):
+ # start by allocating a new room id
+ r = yield self.store.get_room(old_room_id)
+ if r is None:
+ raise NotFoundError("Unknown room id %s" % (old_room_id,))
+ new_room_id = yield self._generate_room_id(
+ creator_id=user_id, is_public=r["is_public"],
+ )
+
+ logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
+
+ # we create and auth the tombstone event before properly creating the new
+ # room, to check our user has perms in the old room.
+ tombstone_event, tombstone_context = (
+ yield self.event_creation_handler.create_event(
+ requester, {
+ "type": EventTypes.Tombstone,
+ "state_key": "",
+ "room_id": old_room_id,
+ "sender": user_id,
+ "content": {
+ "body": "This room has been replaced",
+ "replacement_room": new_room_id,
+ }
+ },
+ token_id=requester.access_token_id,
+ )
+ )
+ yield self.auth.check_from_context(tombstone_event, tombstone_context)
+
+ yield self.clone_exiting_room(
+ requester,
+ old_room_id=old_room_id,
+ new_room_id=new_room_id,
+ new_room_version=new_version,
+ tombstone_event_id=tombstone_event.event_id,
+ )
+
+ # now send the tombstone
+ yield self.event_creation_handler.send_nonmember_event(
+ requester, tombstone_event, tombstone_context,
+ )
+
+ old_room_state = yield tombstone_context.get_current_state_ids(self.store)
+
+ # update any aliases
+ yield self._move_aliases_to_new_room(
+ requester, old_room_id, new_room_id, old_room_state,
+ )
+
+ # and finally, shut down the PLs in the old room, and update them in the new
+ # room.
+ yield self._update_upgraded_room_pls(
+ requester, old_room_id, new_room_id, old_room_state,
+ )
+
+ defer.returnValue(new_room_id)
+
+ @defer.inlineCallbacks
+ def _update_upgraded_room_pls(
+ self, requester, old_room_id, new_room_id, old_room_state,
+ ):
+ """Send updated power levels in both rooms after an upgrade
+
+ Args:
+ requester (synapse.types.Requester): the user requesting the upgrade
+ old_room_id (unicode): the id of the room to be replaced
+ new_room_id (unicode): the id of the replacement room
+ old_room_state (dict[tuple[str, str], str]): the state map for the old room
+
+ Returns:
+ Deferred
+ """
+ old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
+
+ if old_room_pl_event_id is None:
+ logger.warning(
+ "Not supported: upgrading a room with no PL event. Not setting PLs "
+ "in old room.",
+ )
+ return
+
+ old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
+
+ # we try to stop regular users from speaking by setting the PL required
+ # to send regular events and invites to 'Moderator' level. That's normally
+ # 50, but if the default PL in a room is 50 or more, then we set the
+ # required PL above that.
+
+ pl_content = dict(old_room_pl_state.content)
+ users_default = int(pl_content.get("users_default", 0))
+ restricted_level = max(users_default + 1, 50)
+
+ updated = False
+ for v in ("invite", "events_default"):
+ current = int(pl_content.get(v, 0))
+ if current < restricted_level:
+ logger.info(
+ "Setting level for %s in %s to %i (was %i)",
+ v, old_room_id, restricted_level, current,
+ )
+ pl_content[v] = restricted_level
+ updated = True
+ else:
+ logger.info(
+ "Not setting level for %s (already %i)",
+ v, current,
+ )
+
+ if updated:
+ try:
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ requester, {
+ "type": EventTypes.PowerLevels,
+ "state_key": '',
+ "room_id": old_room_id,
+ "sender": requester.user.to_string(),
+ "content": pl_content,
+ }, ratelimit=False,
+ )
+ except AuthError as e:
+ logger.warning("Unable to update PLs in old room: %s", e)
+
+ logger.info("Setting correct PLs in new room")
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ requester, {
+ "type": EventTypes.PowerLevels,
+ "state_key": '',
+ "room_id": new_room_id,
+ "sender": requester.user.to_string(),
+ "content": old_room_pl_state.content,
+ }, ratelimit=False,
+ )
+
+ @defer.inlineCallbacks
+ def clone_exiting_room(
+ self, requester, old_room_id, new_room_id, new_room_version,
+ tombstone_event_id,
+ ):
+ """Populate a new room based on an old room
+
+ Args:
+ requester (synapse.types.Requester): the user requesting the upgrade
+ old_room_id (unicode): the id of the room to be replaced
+ new_room_id (unicode): the id to give the new room (should already have been
+ created with _gemerate_room_id())
+ new_room_version (unicode): the new room version to use
+ tombstone_event_id (unicode|str): the ID of the tombstone event in the old
+ room.
+ Returns:
+ Deferred[None]
+ """
+ user_id = requester.user.to_string()
+
+ if not self.spam_checker.user_may_create_room(user_id):
+ raise SynapseError(403, "You are not permitted to create rooms")
+
+ creation_content = {
+ "room_version": new_room_version,
+ "predecessor": {
+ "room_id": old_room_id,
+ "event_id": tombstone_event_id,
+ }
+ }
+
+ initial_state = dict()
+
+ types_to_copy = (
+ (EventTypes.JoinRules, ""),
+ (EventTypes.Name, ""),
+ (EventTypes.Topic, ""),
+ (EventTypes.RoomHistoryVisibility, ""),
+ (EventTypes.GuestAccess, ""),
+ (EventTypes.RoomAvatar, ""),
+ )
+
+ old_room_state_ids = yield self.store.get_filtered_current_state_ids(
+ old_room_id, StateFilter.from_types(types_to_copy),
+ )
+ # map from event_id to BaseEvent
+ old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
+
+ for k, old_event_id in iteritems(old_room_state_ids):
+ old_event = old_room_state_events.get(old_event_id)
+ if old_event:
+ initial_state[k] = old_event.content
+
+ yield self._send_events_for_new_room(
+ requester,
+ new_room_id,
+
+ # we expect to override all the presets with initial_state, so this is
+ # somewhat arbitrary.
+ preset_config=RoomCreationPreset.PRIVATE_CHAT,
+
+ invite_list=[],
+ initial_state=initial_state,
+ creation_content=creation_content,
+ )
+
+ # XXX invites/joins
+ # XXX 3pid invites
+
+ @defer.inlineCallbacks
+ def _move_aliases_to_new_room(
+ self, requester, old_room_id, new_room_id, old_room_state,
+ ):
+ directory_handler = self.hs.get_handlers().directory_handler
+
+ aliases = yield self.store.get_aliases_for_room(old_room_id)
+
+ # check to see if we have a canonical alias.
+ canonical_alias = None
+ canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
+ if canonical_alias_event_id:
+ canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
+ if canonical_alias_event:
+ canonical_alias = canonical_alias_event.content.get("alias", "")
+
+ # first we try to remove the aliases from the old room (we suppress sending
+ # the room_aliases event until the end).
+ #
+ # Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
+ # and (b) unless the user is a server admin, which the user created.
+ #
+ # This is probably correct - given we don't allow such aliases to be deleted
+ # normally, it would be odd to allow it in the case of doing a room upgrade -
+ # but it makes the upgrade less effective, and you have to wonder why a room
+ # admin can't remove aliases that point to that room anyway.
+ # (cf https://github.com/matrix-org/synapse/issues/2360)
+ #
+ removed_aliases = []
+ for alias_str in aliases:
+ alias = RoomAlias.from_string(alias_str)
+ try:
+ yield directory_handler.delete_association(
+ requester, alias, send_event=False,
+ )
+ removed_aliases.append(alias_str)
+ except SynapseError as e:
+ logger.warning(
+ "Unable to remove alias %s from old room: %s",
+ alias, e,
+ )
+
+ # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
+ # of this.
+ if not removed_aliases:
+ return
+
+ try:
+ # this can fail if, for some reason, our user doesn't have perms to send
+ # m.room.aliases events in the old room (note that we've already checked that
+ # they have perms to send a tombstone event, so that's not terribly likely).
+ #
+ # If that happens, it's regrettable, but we should carry on: it's the same
+ # as when you remove an alias from the directory normally - it just means that
+ # the aliases event gets out of sync with the directory
+ # (cf https://github.com/vector-im/riot-web/issues/2369)
+ yield directory_handler.send_room_alias_update_event(
+ requester, old_room_id,
+ )
+ except AuthError as e:
+ logger.warning(
+ "Failed to send updated alias event on old room: %s", e,
+ )
+
+ # we can now add any aliases we successfully removed to the new room.
+ for alias in removed_aliases:
+ try:
+ yield directory_handler.create_association(
+ requester, RoomAlias.from_string(alias),
+ new_room_id, servers=(self.hs.hostname, ),
+ send_event=False,
+ )
+ logger.info("Moved alias %s to new room", alias)
+ except SynapseError as e:
+ # I'm not really expecting this to happen, but it could if the spam
+ # checking module decides it shouldn't, or similar.
+ logger.error(
+ "Error adding alias %s to new room: %s",
+ alias, e,
+ )
+
+ try:
+ if canonical_alias and (canonical_alias in removed_aliases):
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.CanonicalAlias,
+ "state_key": "",
+ "room_id": new_room_id,
+ "sender": requester.user.to_string(),
+ "content": {"alias": canonical_alias, },
+ },
+ ratelimit=False
+ )
+
+ yield directory_handler.send_room_alias_update_event(
+ requester, new_room_id,
+ )
+ except SynapseError as e:
+ # again I'm not really expecting this to fail, but if it does, I'd rather
+ # we returned the new room to the client at this point.
+ logger.error(
+ "Unable to send updated alias events in new room: %s", e,
+ )
@defer.inlineCallbacks
def create_room(self, requester, config, ratelimit=True,
@@ -165,28 +494,7 @@ class RoomCreationHandler(BaseHandler):
visibility = config.get("visibility", None)
is_public = visibility == "public"
- # autogen room IDs and try to create it. We may clash, so just
- # try a few times till one goes through, giving up eventually.
- attempts = 0
- room_id = None
- while attempts < 5:
- try:
- random_string = stringutils.random_string(18)
- gen_room_id = RoomID(
- random_string,
- self.hs.hostname,
- )
- yield self.store.store_room(
- room_id=gen_room_id.to_string(),
- room_creator_user_id=user_id,
- is_public=is_public
- )
- room_id = gen_room_id.to_string()
- break
- except StoreError:
- attempts += 1
- if not room_id:
- raise StoreError(500, "Couldn't generate a room ID.")
+ room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
if room_alias:
directory_handler = self.hs.get_handlers().directory_handler
@@ -216,18 +524,15 @@ class RoomCreationHandler(BaseHandler):
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version
- room_member_handler = self.hs.get_room_member_handler()
-
yield self._send_events_for_new_room(
requester,
room_id,
- room_member_handler,
preset_config=preset_config,
invite_list=invite_list,
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
- power_level_content_override=config.get("power_level_content_override", {}),
+ power_level_content_override=config.get("power_level_content_override"),
creator_join_profile=creator_join_profile,
)
@@ -263,7 +568,7 @@ class RoomCreationHandler(BaseHandler):
if is_direct:
content["is_direct"] = is_direct
- yield room_member_handler.update_membership(
+ yield self.room_member_handler.update_membership(
requester,
UserID.from_string(invitee),
room_id,
@@ -301,14 +606,13 @@ class RoomCreationHandler(BaseHandler):
self,
creator, # A Requester object.
room_id,
- room_member_handler,
preset_config,
invite_list,
initial_state,
creation_content,
- room_alias,
- power_level_content_override,
- creator_join_profile,
+ room_alias=None,
+ power_level_content_override=None,
+ creator_join_profile=None,
):
def create(etype, content, **kwargs):
e = {
@@ -324,6 +628,7 @@ class RoomCreationHandler(BaseHandler):
@defer.inlineCallbacks
def send(etype, content, **kwargs):
event = create(etype, content, **kwargs)
+ logger.info("Sending %s in new room", etype)
yield self.event_creation_handler.create_and_send_nonmember_event(
creator,
event,
@@ -346,7 +651,8 @@ class RoomCreationHandler(BaseHandler):
content=creation_content,
)
- yield room_member_handler.update_membership(
+ logger.info("Sending %s in new room", EventTypes.Member)
+ yield self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
@@ -388,7 +694,8 @@ class RoomCreationHandler(BaseHandler):
for invitee in invite_list:
power_level_content["users"][invitee] = 100
- power_level_content.update(power_level_content_override)
+ if power_level_content_override:
+ power_level_content.update(power_level_content_override)
yield send(
etype=EventTypes.PowerLevels,
@@ -427,6 +734,30 @@ class RoomCreationHandler(BaseHandler):
content=content,
)
+ @defer.inlineCallbacks
+ def _generate_room_id(self, creator_id, is_public):
+ # autogen room IDs and try to create it. We may clash, so just
+ # try a few times till one goes through, giving up eventually.
+ attempts = 0
+ while attempts < 5:
+ try:
+ random_string = stringutils.random_string(18)
+ gen_room_id = RoomID(
+ random_string,
+ self.hs.hostname,
+ ).to_string()
+ if isinstance(gen_room_id, bytes):
+ gen_room_id = gen_room_id.decode('utf-8')
+ yield self.store.store_room(
+ room_id=gen_room_id,
+ room_creator_user_id=creator_id,
+ is_public=is_public,
+ )
+ defer.returnValue(gen_room_id)
+ except StoreError:
+ attempts += 1
+ raise StoreError(500, "Couldn't generate a room ID.")
+
class RoomContextHandler(object):
def __init__(self, hs):
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index c610933dd4..a61bbf9392 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -63,11 +63,8 @@ class TypingHandler(object):
self._member_typing_until = {} # clock time we expect to stop
self._member_last_federation_poke = {}
- # map room IDs to serial numbers
- self._room_serials = {}
self._latest_room_serial = 0
- # map room IDs to sets of users currently typing
- self._room_typing = {}
+ self._reset()
# caches which room_ids changed at which serials
self._typing_stream_change_cache = StreamChangeCache(
@@ -79,6 +76,15 @@ class TypingHandler(object):
5000,
)
+ def _reset(self):
+ """
+ Reset the typing handler's data caches.
+ """
+ # map room IDs to serial numbers
+ self._room_serials = {}
+ # map room IDs to sets of users currently typing
+ self._room_typing = {}
+
def _handle_timeouts(self):
logger.info("Checking for typing timeouts")
|