diff --git a/CHANGES.md b/CHANGES.md
index df01178971..a35f5aebc7 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,12 @@
+Synapse 0.33.3 (2018-08-22)
+===========================
+
+Bugfixes
+--------
+
+- Fix bug introduced in v0.33.3rc1 which made the ToS give a 500 error ([\#3732](https://github.com/matrix-org/synapse/issues/3732))
+
+
Synapse 0.33.3rc2 (2018-08-21)
==============================
@@ -13,7 +22,7 @@ Synapse 0.33.3rc1 (2018-08-21)
Features
--------
-- Add support for the SNI extension to federation TLS connections ([\#1491](https://github.com/matrix-org/synapse/issues/1491))
+- Add support for the SNI extension to federation TLS connections. Thanks to @vojeroen! ([\#3439](https://github.com/matrix-org/synapse/issues/3439))
- Add /_media/r0/config ([\#3184](https://github.com/matrix-org/synapse/issues/3184))
- speed up /members API and add `at` and `membership` params as per MSC1227 ([\#3568](https://github.com/matrix-org/synapse/issues/3568))
- implement `summary` block in /sync response as per MSC688 ([\#3574](https://github.com/matrix-org/synapse/issues/3574))
@@ -97,7 +106,7 @@ Features
Bugfixes
--------
-- Make /directory/list API return 404 for room not found instead of 400 ([\#2952](https://github.com/matrix-org/synapse/issues/2952))
+- Make /directory/list API return 404 for room not found instead of 400. Thanks to @fuzzmz! ([\#3620](https://github.com/matrix-org/synapse/issues/3620))
- Default inviter_display_name to mxid for email invites ([\#3391](https://github.com/matrix-org/synapse/issues/3391))
- Don't generate TURN credentials if no TURN config options are set ([\#3514](https://github.com/matrix-org/synapse/issues/3514))
- Correctly announce deleted devices over federation ([\#3520](https://github.com/matrix-org/synapse/issues/3520))
diff --git a/changelog.d/3659.feature b/changelog.d/3659.feature
new file mode 100644
index 0000000000..a5b4821c09
--- /dev/null
+++ b/changelog.d/3659.feature
@@ -0,0 +1 @@
+Support profile API endpoints on workers
diff --git a/changelog.d/3673.misc b/changelog.d/3673.misc
new file mode 100644
index 0000000000..d672111fb9
--- /dev/null
+++ b/changelog.d/3673.misc
@@ -0,0 +1 @@
+Refactor state module to support multiple room versions
diff --git a/changelog.d/3680.feature b/changelog.d/3680.feature
new file mode 100644
index 0000000000..4edaaf76a8
--- /dev/null
+++ b/changelog.d/3680.feature
@@ -0,0 +1 @@
+Server notices for resource limit blocking
diff --git a/changelog.d/3724.feature b/changelog.d/3724.feature
new file mode 100644
index 0000000000..1b374ccf47
--- /dev/null
+++ b/changelog.d/3724.feature
@@ -0,0 +1 @@
+Allow guests to use /rooms/:roomId/event/:eventId
diff --git a/changelog.d/3735.misc b/changelog.d/3735.misc
new file mode 100644
index 0000000000..f17004be71
--- /dev/null
+++ b/changelog.d/3735.misc
@@ -0,0 +1 @@
+Fix minor spelling error in federation client documentation.
diff --git a/changelog.d/3746.misc b/changelog.d/3746.misc
new file mode 100644
index 0000000000..fc00ee773a
--- /dev/null
+++ b/changelog.d/3746.misc
@@ -0,0 +1 @@
+Fix MAU cache invalidation due to missing yield
diff --git a/docs/workers.rst b/docs/workers.rst
index 5435f36331..101e950020 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -265,6 +265,7 @@ Handles some event creation. It can handle REST endpoints matching::
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|unstable)/join/
+ ^/_matrix/client/(api/v1|r0|unstable)/profile/
It will create events locally and then send them on to the main synapse
instance to be persisted and handled.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 252c49ca82..e62901b761 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -17,4 +17,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.33.3rc2"
+__version__ = "0.33.3"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 6502a6be7b..4ca40a0f71 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -783,10 +783,16 @@ class Auth(object):
user_id(str|None): If present, checks for presence against existing
MAU cohort
"""
+
+ # Never fail an auth check for the server notices users
+ # This can be a problem where event creation is prohibited due to blocking
+ if user_id == self.hs.config.server_notices_mxid:
+ return
+
if self.hs.config.hs_disabled:
raise ResourceLimitError(
403, self.hs.config.hs_disabled_message,
- errcode=Codes.RESOURCE_LIMIT_EXCEED,
+ errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
admin_uri=self.hs.config.admin_uri,
limit_type=self.hs.config.hs_disabled_limit_type
)
@@ -803,6 +809,6 @@ class Auth(object):
403, "Monthly Active User Limit Exceeded",
admin_uri=self.hs.config.admin_uri,
- errcode=Codes.RESOURCE_LIMIT_EXCEED,
+ errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
limit_type="monthly_active_user"
)
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index b0da506f6d..c2630c4c64 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -78,6 +78,7 @@ class EventTypes(object):
Name = "m.room.name"
ServerACL = "m.room.server_acl"
+ Pinned = "m.room.pinned_events"
class RejectedReason(object):
@@ -97,9 +98,17 @@ class ThirdPartyEntityKind(object):
LOCATION = "location"
+class RoomVersions(object):
+ V1 = "1"
+ VDH_TEST = "vdh-test-version"
+
+
# the version we will give rooms which are created on this server
-DEFAULT_ROOM_VERSION = "1"
+DEFAULT_ROOM_VERSION = RoomVersions.V1
# vdh-test-version is a placeholder to get room versioning support working and tested
# until we have a working v2.
-KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"}
+KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
+
+ServerNoticeMsgType = "m.server_notice"
+ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e26001ab12..c4ddba9889 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -56,7 +56,7 @@ class Codes(object):
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
- RESOURCE_LIMIT_EXCEED = "M_RESOURCE_LIMIT_EXCEED"
+ RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
@@ -238,7 +238,7 @@ class ResourceLimitError(SynapseError):
"""
def __init__(
self, code, msg,
- errcode=Codes.RESOURCE_LIMIT_EXCEED,
+ errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
admin_uri=None,
limit_type=None,
):
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index 03d39968a8..a34c89fa99 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -45,6 +45,11 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.client.v1.profile import (
+ ProfileAvatarURLRestServlet,
+ ProfileDisplaynameRestServlet,
+ ProfileRestServlet,
+)
from synapse.rest.client.v1.room import (
JoinRoomAliasServlet,
RoomMembershipRestServlet,
@@ -53,6 +58,7 @@ from synapse.rest.client.v1.room import (
)
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
+from synapse.storage.user_directory import UserDirectoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
@@ -62,6 +68,9 @@ logger = logging.getLogger("synapse.app.event_creator")
class EventCreatorSlavedStore(
+ # FIXME(#3714): We need to add UserDirectoryStore as we write directly
+ # rather than going via the correct worker.
+ UserDirectoryStore,
DirectoryStore,
SlavedTransactionStore,
SlavedProfileStore,
@@ -101,6 +110,9 @@ class EventCreatorServer(HomeServer):
RoomMembershipRestServlet(self).register(resource)
RoomStateEventRestServlet(self).register(resource)
JoinRoomAliasServlet(self).register(resource)
+ ProfileAvatarURLRestServlet(self).register(resource)
+ ProfileDisplaynameRestServlet(self).register(resource)
+ ProfileRestServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index b4fbe2c9d5..1054441ca5 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -106,7 +106,7 @@ class TransportLayerClient(object):
dest (str)
room_id (str)
event_tuples (list)
- limt (int)
+ limit (int)
Returns:
Deferred: Results in a dict received from the remote homeserver.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 3dd107a285..0ebf0fd188 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -291,8 +291,9 @@ class FederationHandler(BaseHandler):
ev_ids, get_prev_content=False, check_redacted=False
)
+ room_version = yield self.store.get_room_version(pdu.room_id)
state_map = yield resolve_events_with_factory(
- state_groups, {pdu.event_id: pdu}, fetch
+ room_version, state_groups, {pdu.event_id: pdu}, fetch
)
state = (yield self.store.get_events(state_map.values())).values()
@@ -1828,7 +1829,10 @@ class FederationHandler(BaseHandler):
(d.type, d.state_key): d for d in different_events if d
})
+ room_version = yield self.store.get_room_version(event.room_id)
+
new_state = self.state_handler.resolve_events(
+ room_version,
[list(local_view.values()), list(remote_view.values())],
event
)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 9af2e8f869..75b8b7ce6a 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -32,12 +32,16 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
-class ProfileHandler(BaseHandler):
- PROFILE_UPDATE_MS = 60 * 1000
- PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
+class BaseProfileHandler(BaseHandler):
+ """Handles fetching and updating user profile information.
+
+ BaseProfileHandler can be instantiated directly on workers and will
+ delegate to master when necessary. The master process should use the
+ subclass MasterProfileHandler
+ """
def __init__(self, hs):
- super(ProfileHandler, self).__init__(hs)
+ super(BaseProfileHandler, self).__init__(hs)
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
@@ -46,11 +50,6 @@ class ProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
- if hs.config.worker_app is None:
- self.clock.looping_call(
- self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
- )
-
@defer.inlineCallbacks
def get_profile(self, user_id):
target_user = UserID.from_string(user_id)
@@ -282,6 +281,20 @@ class ProfileHandler(BaseHandler):
room_id, str(e.message)
)
+
+class MasterProfileHandler(BaseProfileHandler):
+ PROFILE_UPDATE_MS = 60 * 1000
+ PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
+
+ def __init__(self, hs):
+ super(MasterProfileHandler, self).__init__(hs)
+
+ assert hs.config.worker_app is None
+
+ self.clock.looping_call(
+ self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
+ )
+
def _start_update_remote_profile_cache(self):
return run_as_background_process(
"Update remote profile", self._update_remote_profile_cache,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index fb94b5d7d4..f643619047 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -344,6 +344,7 @@ class RoomMemberHandler(object):
latest_event_ids = (
event_id for (event_id, _, _) in prev_events_and_hashes
)
+
current_state_ids = yield self.state_handler.get_current_state_ids(
room_id, latest_event_ids=latest_event_ids,
)
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 37dda64587..d8413d6aa7 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -119,6 +119,8 @@ class UserDirectoryHandler(object):
"""Called to update index of our local user profiles when they change
irrespective of any rooms the user may be in.
"""
+ # FIXME(#3714): We should probably do this in the same worker as all
+ # the other changes.
yield self.store.update_profile_in_user_dir(
user_id, profile.display_name, profile.avatar_url, None,
)
@@ -127,6 +129,8 @@ class UserDirectoryHandler(object):
def handle_user_deactivated(self, user_id):
"""Called when a user ID is deactivated
"""
+ # FIXME(#3714): We should probably do this in the same worker as all
+ # the other changes.
yield self.store.remove_from_user_dir(user_id)
yield self.store.remove_from_user_in_public_room(user_id)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index fcc1091760..976d98387d 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -531,7 +531,7 @@ class RoomEventServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_GET(self, request, room_id, event_id):
- requester = yield self.auth.get_user_by_req(request)
+ requester = yield self.auth.get_user_by_req(request, allow_guest=True)
event = yield self.event_handler.get_event(requester.user, room_id, event_id)
time_now = self.clock.time_msec()
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index 147ff7d79b..7362e1858d 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -140,7 +140,7 @@ class ConsentResource(Resource):
version = parse_string(request, "v",
default=self._default_consent_version)
username = parse_string(request, "u", required=True)
- userhmac = parse_string(request, "h", required=True)
+ userhmac = parse_string(request, "h", required=True, encoding=None)
self._check_hash(username, userhmac)
@@ -175,7 +175,7 @@ class ConsentResource(Resource):
"""
version = parse_string(request, "v", required=True)
username = parse_string(request, "u", required=True)
- userhmac = parse_string(request, "h", required=True)
+ userhmac = parse_string(request, "h", required=True, encoding=None)
self._check_hash(username, userhmac)
@@ -210,9 +210,18 @@ class ConsentResource(Resource):
finish_request(request)
def _check_hash(self, userid, userhmac):
+ """
+ Args:
+ userid (unicode):
+ userhmac (bytes):
+
+ Raises:
+ SynapseError if the hash doesn't match
+
+ """
want_mac = hmac.new(
key=self._hmac_secret,
- msg=userid,
+ msg=userid.encode('utf-8'),
digestmod=sha256,
).hexdigest()
diff --git a/synapse/server.py b/synapse/server.py
index 26228d8c72..a6fbc6ec0c 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -56,7 +56,7 @@ from synapse.handlers.initial_sync import InitialSyncHandler
from synapse.handlers.message import EventCreationHandler, MessageHandler
from synapse.handlers.pagination import PaginationHandler
from synapse.handlers.presence import PresenceHandler
-from synapse.handlers.profile import ProfileHandler
+from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
from synapse.handlers.read_marker import ReadMarkerHandler
from synapse.handlers.receipts import ReceiptsHandler
from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
@@ -308,7 +308,10 @@ class HomeServer(object):
return InitialSyncHandler(self)
def build_profile_handler(self):
- return ProfileHandler(self)
+ if self.config.worker_app:
+ return BaseProfileHandler(self)
+ else:
+ return MasterProfileHandler(self)
def build_event_creation_handler(self):
return EventCreationHandler(self)
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
new file mode 100644
index 0000000000..575697e54b
--- /dev/null
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -0,0 +1,191 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from six import iteritems
+
+from twisted.internet import defer
+
+from synapse.api.constants import (
+ EventTypes,
+ ServerNoticeLimitReached,
+ ServerNoticeMsgType,
+)
+from synapse.api.errors import AuthError, ResourceLimitError, SynapseError
+from synapse.server_notices.server_notices_manager import SERVER_NOTICE_ROOM_TAG
+
+logger = logging.getLogger(__name__)
+
+
+class ResourceLimitsServerNotices(object):
+ """ Keeps track of whether the server has reached it's resource limit and
+ ensures that the client is kept up to date.
+ """
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer):
+ """
+ self._server_notices_manager = hs.get_server_notices_manager()
+ self._store = hs.get_datastore()
+ self._auth = hs.get_auth()
+ self._config = hs.config
+ self._resouce_limited = False
+ self._message_handler = hs.get_message_handler()
+ self._state = hs.get_state_handler()
+
+ @defer.inlineCallbacks
+ def maybe_send_server_notice_to_user(self, user_id):
+ """Check if we need to send a notice to this user, this will be true in
+ two cases.
+ 1. The server has reached its limit does not reflect this
+ 2. The room state indicates that the server has reached its limit when
+ actually the server is fine
+
+ Args:
+ user_id (str): user to check
+
+ Returns:
+ Deferred
+ """
+ if self._config.hs_disabled is True:
+ return
+
+ if self._config.limit_usage_by_mau is False:
+ return
+
+ timestamp = yield self._store.user_last_seen_monthly_active(user_id)
+ if timestamp is None:
+ # This user will be blocked from receiving the notice anyway.
+ # In practice, not sure we can ever get here
+ return
+
+ # Determine current state of room
+
+ room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
+
+ yield self._check_and_set_tags(user_id, room_id)
+ currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id)
+
+ try:
+ # Normally should always pass in user_id if you have it, but in
+ # this case are checking what would happen to other users if they
+ # were to arrive.
+ try:
+ yield self._auth.check_auth_blocking()
+ is_auth_blocking = False
+ except ResourceLimitError as e:
+ is_auth_blocking = True
+ event_content = e.msg
+ event_limit_type = e.limit_type
+
+ if currently_blocked and not is_auth_blocking:
+ # Room is notifying of a block, when it ought not to be.
+ # Remove block notification
+ content = {
+ "pinned": ref_events
+ }
+ yield self._server_notices_manager.send_notice(
+ user_id, content, EventTypes.Pinned, '',
+ )
+
+ elif not currently_blocked and is_auth_blocking:
+ # Room is not notifying of a block, when it ought to be.
+ # Add block notification
+ content = {
+ 'body': event_content,
+ 'msgtype': ServerNoticeMsgType,
+ 'server_notice_type': ServerNoticeLimitReached,
+ 'admin_uri': self._config.admin_uri,
+ 'limit_type': event_limit_type
+ }
+ event = yield self._server_notices_manager.send_notice(
+ user_id, content, EventTypes.Message,
+ )
+
+ content = {
+ "pinned": [
+ event.event_id,
+ ]
+ }
+ yield self._server_notices_manager.send_notice(
+ user_id, content, EventTypes.Pinned, '',
+ )
+
+ except SynapseError as e:
+ logger.error("Error sending resource limits server notice: %s", e)
+
+ @defer.inlineCallbacks
+ def _check_and_set_tags(self, user_id, room_id):
+ """
+ Since server notices rooms were originally not with tags,
+ important to check that tags have been set correctly
+ Args:
+ user_id(str): the user in question
+ room_id(str): the server notices room for that user
+ """
+ tags = yield self._store.get_tags_for_user(user_id)
+ server_notices_tags = tags.get(room_id)
+ need_to_set_tag = True
+ if server_notices_tags:
+ if server_notices_tags.get(SERVER_NOTICE_ROOM_TAG):
+ # tag already present, nothing to do here
+ need_to_set_tag = False
+ if need_to_set_tag:
+ yield self._store.add_tag_to_room(
+ user_id, room_id, SERVER_NOTICE_ROOM_TAG, None
+ )
+
+ @defer.inlineCallbacks
+ def _is_room_currently_blocked(self, room_id):
+ """
+ Determines if the room is currently blocked
+
+ Args:
+ room_id(str): The room id of the server notices room
+
+ Returns:
+
+ bool: Is the room currently blocked
+ list: The list of pinned events that are unrelated to limit blocking
+ This list can be used as a convenience in the case where the block
+ is to be lifted and the remaining pinned event references need to be
+ preserved
+ """
+ currently_blocked = False
+ pinned_state_event = None
+ try:
+ pinned_state_event = yield self._state.get_current_state(
+ room_id, event_type=EventTypes.Pinned
+ )
+ except AuthError:
+ # The user has yet to join the server notices room
+ pass
+
+ referenced_events = []
+ if pinned_state_event is not None:
+ referenced_events = pinned_state_event.content.get('pinned')
+
+ events = yield self._store.get_events(referenced_events)
+ for event_id, event in iteritems(events):
+ if event.type != EventTypes.Message:
+ continue
+ if event.content.get("msgtype") == ServerNoticeMsgType:
+ currently_blocked = True
+ # remove event in case we need to disable blocking later on.
+ if event_id in referenced_events:
+ referenced_events.remove(event.event_id)
+
+ defer.returnValue((currently_blocked, referenced_events))
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index a26deace53..5968104a99 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -22,6 +22,8 @@ from synapse.util.caches.descriptors import cachedInlineCallbacks
logger = logging.getLogger(__name__)
+SERVER_NOTICE_ROOM_TAG = "m.server_notice"
+
class ServerNoticesManager(object):
def __init__(self, hs):
@@ -46,7 +48,10 @@ class ServerNoticesManager(object):
return self._config.server_notices_mxid is not None
@defer.inlineCallbacks
- def send_notice(self, user_id, event_content):
+ def send_notice(
+ self, user_id, event_content,
+ type=EventTypes.Message, state_key=None
+ ):
"""Send a notice to the given user
Creates the server notices room, if none exists.
@@ -54,9 +59,11 @@ class ServerNoticesManager(object):
Args:
user_id (str): mxid of user to send event to.
event_content (dict): content of event to send
+ type(EventTypes): type of event
+ is_state_event(bool): Is the event a state event
Returns:
- Deferred[None]
+ Deferred[FrozenEvent]
"""
room_id = yield self.get_notice_room_for_user(user_id)
@@ -65,15 +72,20 @@ class ServerNoticesManager(object):
logger.info("Sending server notice to %s", user_id)
- yield self._event_creation_handler.create_and_send_nonmember_event(
- requester, {
- "type": EventTypes.Message,
- "room_id": room_id,
- "sender": system_mxid,
- "content": event_content,
- },
- ratelimit=False,
+ event_dict = {
+ "type": type,
+ "room_id": room_id,
+ "sender": system_mxid,
+ "content": event_content,
+ }
+
+ if state_key is not None:
+ event_dict['state_key'] = state_key
+
+ res = yield self._event_creation_handler.create_and_send_nonmember_event(
+ requester, event_dict, ratelimit=False,
)
+ defer.returnValue(res)
@cachedInlineCallbacks()
def get_notice_room_for_user(self, user_id):
@@ -141,6 +153,9 @@ class ServerNoticesManager(object):
creator_join_profile=join_profile,
)
room_id = info['room_id']
+ yield self._store.add_tag_to_room(
+ user_id, room_id, SERVER_NOTICE_ROOM_TAG, None
+ )
logger.info("Created server notices room %s for %s", room_id, user_id)
defer.returnValue(room_id)
diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py
index 5d23965f34..6121b2f267 100644
--- a/synapse/server_notices/server_notices_sender.py
+++ b/synapse/server_notices/server_notices_sender.py
@@ -12,7 +12,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.internet import defer
+
from synapse.server_notices.consent_server_notices import ConsentServerNotices
+from synapse.server_notices.resource_limits_server_notices import (
+ ResourceLimitsServerNotices,
+)
class ServerNoticesSender(object):
@@ -25,34 +30,34 @@ class ServerNoticesSender(object):
Args:
hs (synapse.server.HomeServer):
"""
- # todo: it would be nice to make this more dynamic
- self._consent_server_notices = ConsentServerNotices(hs)
+ self._server_notices = (
+ ConsentServerNotices(hs),
+ ResourceLimitsServerNotices(hs)
+ )
+ @defer.inlineCallbacks
def on_user_syncing(self, user_id):
"""Called when the user performs a sync operation.
Args:
user_id (str): mxid of user who synced
-
- Returns:
- Deferred
"""
- return self._consent_server_notices.maybe_send_server_notice_to_user(
- user_id,
- )
+ for sn in self._server_notices:
+ yield sn.maybe_send_server_notice_to_user(
+ user_id,
+ )
+ @defer.inlineCallbacks
def on_user_ip(self, user_id):
"""Called on the master when a worker process saw a client request.
Args:
user_id (str): mxid
-
- Returns:
- Deferred
"""
# The synchrotrons use a stubbed version of ServerNoticesSender, so
# we check for notices to send to the user in on_user_ip as well as
# in on_user_syncing
- return self._consent_server_notices.maybe_send_server_notice_to_user(
- user_id,
- )
+ for sn in self._server_notices:
+ yield sn.maybe_send_server_notice_to_user(
+ user_id,
+ )
diff --git a/synapse/state.py b/synapse/state/__init__.py
index 0f2bedb694..b34970e4d1 100644
--- a/synapse/state.py
+++ b/synapse/state/__init__.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,21 +14,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-import hashlib
import logging
from collections import namedtuple
-from six import iteritems, iterkeys, itervalues
+from six import iteritems, itervalues
from frozendict import frozendict
from twisted.internet import defer
-from synapse import event_auth
-from synapse.api.constants import EventTypes
-from synapse.api.errors import AuthError
+from synapse.api.constants import EventTypes, RoomVersions
from synapse.events.snapshot import EventContext
+from synapse.state import v1
from synapse.util.async_helpers import Linearizer
from synapse.util.caches import get_cache_factor_for
from synapse.util.caches.expiringcache import ExpiringCache
@@ -264,6 +262,7 @@ class StateHandler(object):
defer.returnValue(context)
logger.debug("calling resolve_state_groups from compute_event_context")
+
entry = yield self.resolve_state_groups_for_events(
event.room_id, [e for e, _ in event.prev_events],
)
@@ -338,8 +337,11 @@ class StateHandler(object):
event, resolves conflicts between them and returns them.
Args:
- room_id (str):
- event_ids (list[str]):
+ room_id (str)
+ event_ids (list[str])
+ explicit_room_version (str|None): If set uses the the given room
+ version to choose the resolution algorithm. If None, then
+ checks the database for room version.
Returns:
Deferred[_StateCacheEntry]: resolved state
@@ -353,7 +355,12 @@ class StateHandler(object):
room_id, event_ids
)
- if len(state_groups_ids) == 1:
+ if len(state_groups_ids) == 0:
+ defer.returnValue(_StateCacheEntry(
+ state={},
+ state_group=None,
+ ))
+ elif len(state_groups_ids) == 1:
name, state_list = list(state_groups_ids.items()).pop()
prev_group, delta_ids = yield self.store.get_state_group_delta(name)
@@ -365,8 +372,11 @@ class StateHandler(object):
delta_ids=delta_ids,
))
+ room_version = yield self.store.get_room_version(room_id)
+
result = yield self._state_resolution_handler.resolve_state_groups(
- room_id, state_groups_ids, None, self._state_map_factory,
+ room_id, room_version, state_groups_ids, None,
+ self._state_map_factory,
)
defer.returnValue(result)
@@ -375,7 +385,7 @@ class StateHandler(object):
ev_ids, get_prev_content=False, check_redacted=False,
)
- def resolve_events(self, state_sets, event):
+ def resolve_events(self, room_version, state_sets, event):
logger.info(
"Resolving state for %s with %d groups", event.room_id, len(state_sets)
)
@@ -391,7 +401,9 @@ class StateHandler(object):
}
with Measure(self.clock, "state._resolve_events"):
- new_state = resolve_events_with_state_map(state_set_ids, state_map)
+ new_state = resolve_events_with_state_map(
+ room_version, state_set_ids, state_map,
+ )
new_state = {
key: state_map[ev_id] for key, ev_id in iteritems(new_state)
@@ -430,7 +442,7 @@ class StateResolutionHandler(object):
@defer.inlineCallbacks
@log_function
def resolve_state_groups(
- self, room_id, state_groups_ids, event_map, state_map_factory,
+ self, room_id, room_version, state_groups_ids, event_map, state_map_factory,
):
"""Resolves conflicts between a set of state groups
@@ -439,6 +451,7 @@ class StateResolutionHandler(object):
Args:
room_id (str): room we are resolving for (used for logging)
+ room_version (str): version of the room
state_groups_ids (dict[int, dict[(str, str), str]]):
map from state group id to the state in that state group
(where 'state' is a map from state key to event id)
@@ -492,6 +505,7 @@ class StateResolutionHandler(object):
logger.info("Resolving conflicted state for %r", room_id)
with Measure(self.clock, "state._resolve_events"):
new_state = yield resolve_events_with_factory(
+ room_version,
list(itervalues(state_groups_ids)),
event_map=event_map,
state_map_factory=state_map_factory,
@@ -575,16 +589,10 @@ def _make_state_cache_entry(
)
-def _ordered_events(events):
- def key_func(e):
- return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
-
- return sorted(events, key=key_func)
-
-
-def resolve_events_with_state_map(state_sets, state_map):
+def resolve_events_with_state_map(room_version, state_sets, state_map):
"""
Args:
+ room_version(str): Version of the room
state_sets(list): List of dicts of (type, state_key) -> event_id,
which are the different state groups to resolve.
state_map(dict): a dict from event_id to event, for all events in
@@ -594,75 +602,23 @@ def resolve_events_with_state_map(state_sets, state_map):
dict[(str, str), str]:
a map from (type, state_key) to event_id.
"""
- if len(state_sets) == 1:
- return state_sets[0]
-
- unconflicted_state, conflicted_state = _seperate(
- state_sets,
- )
-
- auth_events = _create_auth_events_from_maps(
- unconflicted_state, conflicted_state, state_map
- )
-
- return _resolve_with_state(
- unconflicted_state, conflicted_state, auth_events, state_map
- )
-
-
-def _seperate(state_sets):
- """Takes the state_sets and figures out which keys are conflicted and
- which aren't. i.e., which have multiple different event_ids associated
- with them in different state sets.
-
- Args:
- state_sets(iterable[dict[(str, str), str]]):
- List of dicts of (type, state_key) -> event_id, which are the
- different state groups to resolve.
-
- Returns:
- (dict[(str, str), str], dict[(str, str), set[str]]):
- A tuple of (unconflicted_state, conflicted_state), where:
-
- unconflicted_state is a dict mapping (type, state_key)->event_id
- for unconflicted state keys.
-
- conflicted_state is a dict mapping (type, state_key) to a set of
- event ids for conflicted state keys.
- """
- state_set_iterator = iter(state_sets)
- unconflicted_state = dict(next(state_set_iterator))
- conflicted_state = {}
-
- for state_set in state_set_iterator:
- for key, value in iteritems(state_set):
- # Check if there is an unconflicted entry for the state key.
- unconflicted_value = unconflicted_state.get(key)
- if unconflicted_value is None:
- # There isn't an unconflicted entry so check if there is a
- # conflicted entry.
- ls = conflicted_state.get(key)
- if ls is None:
- # There wasn't a conflicted entry so haven't seen this key before.
- # Therefore it isn't conflicted yet.
- unconflicted_state[key] = value
- else:
- # This key is already conflicted, add our value to the conflict set.
- ls.add(value)
- elif unconflicted_value != value:
- # If the unconflicted value is not the same as our value then we
- # have a new conflict. So move the key from the unconflicted_state
- # to the conflicted state.
- conflicted_state[key] = {value, unconflicted_value}
- unconflicted_state.pop(key, None)
-
- return unconflicted_state, conflicted_state
+ if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
+ return v1.resolve_events_with_state_map(
+ state_sets, state_map,
+ )
+ else:
+ # This should only happen if we added a version but forgot to add it to
+ # the list above.
+ raise Exception(
+ "No state resolution algorithm defined for version %r" % (room_version,)
+ )
-@defer.inlineCallbacks
-def resolve_events_with_factory(state_sets, event_map, state_map_factory):
+def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory):
"""
Args:
+ room_version(str): Version of the room
+
state_sets(list): List of dicts of (type, state_key) -> event_id,
which are the different state groups to resolve.
@@ -682,185 +638,13 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
Deferred[dict[(str, str), str]]:
a map from (type, state_key) to event_id.
"""
- if len(state_sets) == 1:
- defer.returnValue(state_sets[0])
-
- unconflicted_state, conflicted_state = _seperate(
- state_sets,
- )
-
- needed_events = set(
- event_id
- for event_ids in itervalues(conflicted_state)
- for event_id in event_ids
- )
- if event_map is not None:
- needed_events -= set(iterkeys(event_map))
-
- logger.info("Asking for %d conflicted events", len(needed_events))
-
- # dict[str, FrozenEvent]: a map from state event id to event. Only includes
- # the state events which are in conflict (and those in event_map)
- state_map = yield state_map_factory(needed_events)
- if event_map is not None:
- state_map.update(event_map)
-
- # get the ids of the auth events which allow us to authenticate the
- # conflicted state, picking only from the unconflicting state.
- #
- # dict[(str, str), str]: a map from state key to event id
- auth_events = _create_auth_events_from_maps(
- unconflicted_state, conflicted_state, state_map
- )
-
- new_needed_events = set(itervalues(auth_events))
- new_needed_events -= needed_events
- if event_map is not None:
- new_needed_events -= set(iterkeys(event_map))
-
- logger.info("Asking for %d auth events", len(new_needed_events))
-
- state_map_new = yield state_map_factory(new_needed_events)
- state_map.update(state_map_new)
-
- defer.returnValue(_resolve_with_state(
- unconflicted_state, conflicted_state, auth_events, state_map
- ))
-
-
-def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map):
- auth_events = {}
- for event_ids in itervalues(conflicted_state):
- for event_id in event_ids:
- if event_id in state_map:
- keys = event_auth.auth_types_for_event(state_map[event_id])
- for key in keys:
- if key not in auth_events:
- event_id = unconflicted_state.get(key, None)
- if event_id:
- auth_events[key] = event_id
- return auth_events
-
-
-def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids,
- state_map):
- conflicted_state = {}
- for key, event_ids in iteritems(conflicted_state_ids):
- events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
- if len(events) > 1:
- conflicted_state[key] = events
- elif len(events) == 1:
- unconflicted_state_ids[key] = events[0].event_id
-
- auth_events = {
- key: state_map[ev_id]
- for key, ev_id in iteritems(auth_event_ids)
- if ev_id in state_map
- }
-
- try:
- resolved_state = _resolve_state_events(
- conflicted_state, auth_events
+ if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
+ return v1.resolve_events_with_factory(
+ state_sets, event_map, state_map_factory,
+ )
+ else:
+ # This should only happen if we added a version but forgot to add it to
+ # the list above.
+ raise Exception(
+ "No state resolution algorithm defined for version %r" % (room_version,)
)
- except Exception:
- logger.exception("Failed to resolve state")
- raise
-
- new_state = unconflicted_state_ids
- for key, event in iteritems(resolved_state):
- new_state[key] = event.event_id
-
- return new_state
-
-
-def _resolve_state_events(conflicted_state, auth_events):
- """ This is where we actually decide which of the conflicted state to
- use.
-
- We resolve conflicts in the following order:
- 1. power levels
- 2. join rules
- 3. memberships
- 4. other events.
- """
- resolved_state = {}
- if POWER_KEY in conflicted_state:
- events = conflicted_state[POWER_KEY]
- logger.debug("Resolving conflicted power levels %r", events)
- resolved_state[POWER_KEY] = _resolve_auth_events(
- events, auth_events)
-
- auth_events.update(resolved_state)
-
- for key, events in iteritems(conflicted_state):
- if key[0] == EventTypes.JoinRules:
- logger.debug("Resolving conflicted join rules %r", events)
- resolved_state[key] = _resolve_auth_events(
- events,
- auth_events
- )
-
- auth_events.update(resolved_state)
-
- for key, events in iteritems(conflicted_state):
- if key[0] == EventTypes.Member:
- logger.debug("Resolving conflicted member lists %r", events)
- resolved_state[key] = _resolve_auth_events(
- events,
- auth_events
- )
-
- auth_events.update(resolved_state)
-
- for key, events in iteritems(conflicted_state):
- if key not in resolved_state:
- logger.debug("Resolving conflicted state %r:%r", key, events)
- resolved_state[key] = _resolve_normal_events(
- events, auth_events
- )
-
- return resolved_state
-
-
-def _resolve_auth_events(events, auth_events):
- reverse = [i for i in reversed(_ordered_events(events))]
-
- auth_keys = set(
- key
- for event in events
- for key in event_auth.auth_types_for_event(event)
- )
-
- new_auth_events = {}
- for key in auth_keys:
- auth_event = auth_events.get(key, None)
- if auth_event:
- new_auth_events[key] = auth_event
-
- auth_events = new_auth_events
-
- prev_event = reverse[0]
- for event in reverse[1:]:
- auth_events[(prev_event.type, prev_event.state_key)] = prev_event
- try:
- # The signatures have already been checked at this point
- event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
- prev_event = event
- except AuthError:
- return prev_event
-
- return event
-
-
-def _resolve_normal_events(events, auth_events):
- for event in _ordered_events(events):
- try:
- # The signatures have already been checked at this point
- event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
- return event
- except AuthError:
- pass
-
- # Use the last event (the one with the least depth) if they all fail
- # the auth check.
- return event
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
new file mode 100644
index 0000000000..3a1f7054a1
--- /dev/null
+++ b/synapse/state/v1.py
@@ -0,0 +1,321 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import logging
+
+from six import iteritems, iterkeys, itervalues
+
+from twisted.internet import defer
+
+from synapse import event_auth
+from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError
+
+logger = logging.getLogger(__name__)
+
+
+POWER_KEY = (EventTypes.PowerLevels, "")
+
+
+def resolve_events_with_state_map(state_sets, state_map):
+ """
+ Args:
+ state_sets(list): List of dicts of (type, state_key) -> event_id,
+ which are the different state groups to resolve.
+ state_map(dict): a dict from event_id to event, for all events in
+ state_sets.
+
+ Returns
+ dict[(str, str), str]:
+ a map from (type, state_key) to event_id.
+ """
+ if len(state_sets) == 1:
+ return state_sets[0]
+
+ unconflicted_state, conflicted_state = _seperate(
+ state_sets,
+ )
+
+ auth_events = _create_auth_events_from_maps(
+ unconflicted_state, conflicted_state, state_map
+ )
+
+ return _resolve_with_state(
+ unconflicted_state, conflicted_state, auth_events, state_map
+ )
+
+
+@defer.inlineCallbacks
+def resolve_events_with_factory(state_sets, event_map, state_map_factory):
+ """
+ Args:
+ state_sets(list): List of dicts of (type, state_key) -> event_id,
+ which are the different state groups to resolve.
+
+ event_map(dict[str,FrozenEvent]|None):
+ a dict from event_id to event, for any events that we happen to
+ have in flight (eg, those currently being persisted). This will be
+ used as a starting point fof finding the state we need; any missing
+ events will be requested via state_map_factory.
+
+ If None, all events will be fetched via state_map_factory.
+
+ state_map_factory(func): will be called
+ with a list of event_ids that are needed, and should return with
+ a Deferred of dict of event_id to event.
+
+ Returns
+ Deferred[dict[(str, str), str]]:
+ a map from (type, state_key) to event_id.
+ """
+ if len(state_sets) == 1:
+ defer.returnValue(state_sets[0])
+
+ unconflicted_state, conflicted_state = _seperate(
+ state_sets,
+ )
+
+ needed_events = set(
+ event_id
+ for event_ids in itervalues(conflicted_state)
+ for event_id in event_ids
+ )
+ if event_map is not None:
+ needed_events -= set(iterkeys(event_map))
+
+ logger.info("Asking for %d conflicted events", len(needed_events))
+
+ # dict[str, FrozenEvent]: a map from state event id to event. Only includes
+ # the state events which are in conflict (and those in event_map)
+ state_map = yield state_map_factory(needed_events)
+ if event_map is not None:
+ state_map.update(event_map)
+
+ # get the ids of the auth events which allow us to authenticate the
+ # conflicted state, picking only from the unconflicting state.
+ #
+ # dict[(str, str), str]: a map from state key to event id
+ auth_events = _create_auth_events_from_maps(
+ unconflicted_state, conflicted_state, state_map
+ )
+
+ new_needed_events = set(itervalues(auth_events))
+ new_needed_events -= needed_events
+ if event_map is not None:
+ new_needed_events -= set(iterkeys(event_map))
+
+ logger.info("Asking for %d auth events", len(new_needed_events))
+
+ state_map_new = yield state_map_factory(new_needed_events)
+ state_map.update(state_map_new)
+
+ defer.returnValue(_resolve_with_state(
+ unconflicted_state, conflicted_state, auth_events, state_map
+ ))
+
+
+def _seperate(state_sets):
+ """Takes the state_sets and figures out which keys are conflicted and
+ which aren't. i.e., which have multiple different event_ids associated
+ with them in different state sets.
+
+ Args:
+ state_sets(iterable[dict[(str, str), str]]):
+ List of dicts of (type, state_key) -> event_id, which are the
+ different state groups to resolve.
+
+ Returns:
+ (dict[(str, str), str], dict[(str, str), set[str]]):
+ A tuple of (unconflicted_state, conflicted_state), where:
+
+ unconflicted_state is a dict mapping (type, state_key)->event_id
+ for unconflicted state keys.
+
+ conflicted_state is a dict mapping (type, state_key) to a set of
+ event ids for conflicted state keys.
+ """
+ state_set_iterator = iter(state_sets)
+ unconflicted_state = dict(next(state_set_iterator))
+ conflicted_state = {}
+
+ for state_set in state_set_iterator:
+ for key, value in iteritems(state_set):
+ # Check if there is an unconflicted entry for the state key.
+ unconflicted_value = unconflicted_state.get(key)
+ if unconflicted_value is None:
+ # There isn't an unconflicted entry so check if there is a
+ # conflicted entry.
+ ls = conflicted_state.get(key)
+ if ls is None:
+ # There wasn't a conflicted entry so haven't seen this key before.
+ # Therefore it isn't conflicted yet.
+ unconflicted_state[key] = value
+ else:
+ # This key is already conflicted, add our value to the conflict set.
+ ls.add(value)
+ elif unconflicted_value != value:
+ # If the unconflicted value is not the same as our value then we
+ # have a new conflict. So move the key from the unconflicted_state
+ # to the conflicted state.
+ conflicted_state[key] = {value, unconflicted_value}
+ unconflicted_state.pop(key, None)
+
+ return unconflicted_state, conflicted_state
+
+
+def _create_auth_events_from_maps(unconflicted_state, conflicted_state, state_map):
+ auth_events = {}
+ for event_ids in itervalues(conflicted_state):
+ for event_id in event_ids:
+ if event_id in state_map:
+ keys = event_auth.auth_types_for_event(state_map[event_id])
+ for key in keys:
+ if key not in auth_events:
+ event_id = unconflicted_state.get(key, None)
+ if event_id:
+ auth_events[key] = event_id
+ return auth_events
+
+
+def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids,
+ state_map):
+ conflicted_state = {}
+ for key, event_ids in iteritems(conflicted_state_ids):
+ events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
+ if len(events) > 1:
+ conflicted_state[key] = events
+ elif len(events) == 1:
+ unconflicted_state_ids[key] = events[0].event_id
+
+ auth_events = {
+ key: state_map[ev_id]
+ for key, ev_id in iteritems(auth_event_ids)
+ if ev_id in state_map
+ }
+
+ try:
+ resolved_state = _resolve_state_events(
+ conflicted_state, auth_events
+ )
+ except Exception:
+ logger.exception("Failed to resolve state")
+ raise
+
+ new_state = unconflicted_state_ids
+ for key, event in iteritems(resolved_state):
+ new_state[key] = event.event_id
+
+ return new_state
+
+
+def _resolve_state_events(conflicted_state, auth_events):
+ """ This is where we actually decide which of the conflicted state to
+ use.
+
+ We resolve conflicts in the following order:
+ 1. power levels
+ 2. join rules
+ 3. memberships
+ 4. other events.
+ """
+ resolved_state = {}
+ if POWER_KEY in conflicted_state:
+ events = conflicted_state[POWER_KEY]
+ logger.debug("Resolving conflicted power levels %r", events)
+ resolved_state[POWER_KEY] = _resolve_auth_events(
+ events, auth_events)
+
+ auth_events.update(resolved_state)
+
+ for key, events in iteritems(conflicted_state):
+ if key[0] == EventTypes.JoinRules:
+ logger.debug("Resolving conflicted join rules %r", events)
+ resolved_state[key] = _resolve_auth_events(
+ events,
+ auth_events
+ )
+
+ auth_events.update(resolved_state)
+
+ for key, events in iteritems(conflicted_state):
+ if key[0] == EventTypes.Member:
+ logger.debug("Resolving conflicted member lists %r", events)
+ resolved_state[key] = _resolve_auth_events(
+ events,
+ auth_events
+ )
+
+ auth_events.update(resolved_state)
+
+ for key, events in iteritems(conflicted_state):
+ if key not in resolved_state:
+ logger.debug("Resolving conflicted state %r:%r", key, events)
+ resolved_state[key] = _resolve_normal_events(
+ events, auth_events
+ )
+
+ return resolved_state
+
+
+def _resolve_auth_events(events, auth_events):
+ reverse = [i for i in reversed(_ordered_events(events))]
+
+ auth_keys = set(
+ key
+ for event in events
+ for key in event_auth.auth_types_for_event(event)
+ )
+
+ new_auth_events = {}
+ for key in auth_keys:
+ auth_event = auth_events.get(key, None)
+ if auth_event:
+ new_auth_events[key] = auth_event
+
+ auth_events = new_auth_events
+
+ prev_event = reverse[0]
+ for event in reverse[1:]:
+ auth_events[(prev_event.type, prev_event.state_key)] = prev_event
+ try:
+ # The signatures have already been checked at this point
+ event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
+ prev_event = event
+ except AuthError:
+ return prev_event
+
+ return event
+
+
+def _resolve_normal_events(events, auth_events):
+ for event in _ordered_events(events):
+ try:
+ # The signatures have already been checked at this point
+ event_auth.check(event, auth_events, do_sig_check=False, do_size_check=False)
+ return event
+ except AuthError:
+ pass
+
+ # Use the last event (the one with the least depth) if they all fail
+ # the auth check.
+ return event
+
+
+def _ordered_events(events):
+ def key_func(e):
+ return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
+
+ return sorted(events, key=key_func)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 025a7fb6d9..f39c8c8461 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -705,9 +705,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
}
events_map = {ev.event_id: ev for ev, _ in events_context}
+ room_version = yield self.get_room_version(room_id)
+
logger.debug("calling resolve_state_groups from preserve_events")
res = yield self._state_resolution_handler.resolve_state_groups(
- room_id, state_groups, events_map, get_events
+ room_id, room_version, state_groups, events_map, get_events
)
defer.returnValue((res.state, None))
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
index 06f9a75a97..fd3b630bd2 100644
--- a/synapse/storage/monthly_active_users.py
+++ b/synapse/storage/monthly_active_users.py
@@ -147,6 +147,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
return count
return self.runInteraction("count_users", _count_users)
+ @defer.inlineCallbacks
def upsert_monthly_active_user(self, user_id):
"""
Updates or inserts monthly active user member
@@ -155,7 +156,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
Deferred[bool]: True if a new entry was created, False if an
existing one was updated.
"""
- is_insert = self._simple_upsert(
+ is_insert = yield self._simple_upsert(
desc="upsert_monthly_active_user",
table="monthly_active_users",
keyvalues={
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index 60295da254..88b50f33b5 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -71,8 +71,6 @@ class ProfileWorkerStore(SQLBaseStore):
desc="get_from_remote_profile_cache",
)
-
-class ProfileStore(ProfileWorkerStore):
def create_profile(self, user_localpart):
return self._simple_insert(
table="profiles",
@@ -96,6 +94,8 @@ class ProfileStore(ProfileWorkerStore):
desc="set_profile_avatar_url",
)
+
+class ProfileStore(ProfileWorkerStore):
def add_remote_profile_cache(self, user_id, displayname, avatar_url):
"""Ensure we are caching the remote user's profiles.
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 3378fc77d1..61013b8919 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -186,6 +186,35 @@ class RoomWorkerStore(SQLBaseStore):
desc="is_room_blocked",
)
+ @cachedInlineCallbacks(max_entries=10000)
+ def get_ratelimit_for_user(self, user_id):
+ """Check if there are any overrides for ratelimiting for the given
+ user
+
+ Args:
+ user_id (str)
+
+ Returns:
+ RatelimitOverride if there is an override, else None. If the contents
+ of RatelimitOverride are None or 0 then ratelimitng has been
+ disabled for that user entirely.
+ """
+ row = yield self._simple_select_one(
+ table="ratelimit_override",
+ keyvalues={"user_id": user_id},
+ retcols=("messages_per_second", "burst_count"),
+ allow_none=True,
+ desc="get_ratelimit_for_user",
+ )
+
+ if row:
+ defer.returnValue(RatelimitOverride(
+ messages_per_second=row["messages_per_second"],
+ burst_count=row["burst_count"],
+ ))
+ else:
+ defer.returnValue(None)
+
class RoomStore(RoomWorkerStore, SearchStore):
@@ -469,35 +498,6 @@ class RoomStore(RoomWorkerStore, SearchStore):
"get_all_new_public_rooms", get_all_new_public_rooms
)
- @cachedInlineCallbacks(max_entries=10000)
- def get_ratelimit_for_user(self, user_id):
- """Check if there are any overrides for ratelimiting for the given
- user
-
- Args:
- user_id (str)
-
- Returns:
- RatelimitOverride if there is an override, else None. If the contents
- of RatelimitOverride are None or 0 then ratelimitng has been
- disabled for that user entirely.
- """
- row = yield self._simple_select_one(
- table="ratelimit_override",
- keyvalues={"user_id": user_id},
- retcols=("messages_per_second", "burst_count"),
- allow_none=True,
- desc="get_ratelimit_for_user",
- )
-
- if row:
- defer.returnValue(RatelimitOverride(
- messages_per_second=row["messages_per_second"],
- burst_count=row["burst_count"],
- ))
- else:
- defer.returnValue(None)
-
@defer.inlineCallbacks
def block_room(self, room_id, user_id):
yield self._simple_insert(
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 022d81ce3e..ed960090c4 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -458,7 +458,7 @@ class AuthTestCase(unittest.TestCase):
with self.assertRaises(ResourceLimitError) as e:
yield self.auth.check_auth_blocking()
self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri)
- self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
+ self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.assertEquals(e.exception.code, 403)
# Ensure does not throw an error
@@ -474,5 +474,13 @@ class AuthTestCase(unittest.TestCase):
with self.assertRaises(ResourceLimitError) as e:
yield self.auth.check_auth_blocking()
self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri)
- self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
+ self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.assertEquals(e.exception.code, 403)
+
+ @defer.inlineCallbacks
+ def test_server_notices_mxid_special_cased(self):
+ self.hs.config.hs_disabled = True
+ user = "@user:server"
+ self.hs.config.server_notices_mxid = user
+ self.hs.config.hs_disabled_message = "Reason for being disabled"
+ yield self.auth.check_auth_blocking(user)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 62dc69003c..80da1c8954 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -20,7 +20,7 @@ from twisted.internet import defer
import synapse.types
from synapse.api.errors import AuthError
-from synapse.handlers.profile import ProfileHandler
+from synapse.handlers.profile import MasterProfileHandler
from synapse.types import UserID
from tests import unittest
@@ -29,7 +29,7 @@ from tests.utils import setup_test_homeserver
class ProfileHandlers(object):
def __init__(self, hs):
- self.profile_handler = ProfileHandler(hs)
+ self.profile_handler = MasterProfileHandler(hs)
class ProfileTestCase(unittest.TestCase):
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index a01ab471f5..31f54bbd7d 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -51,7 +51,7 @@ class SyncTestCase(tests.unittest.TestCase):
self.hs.config.hs_disabled = True
with self.assertRaises(ResourceLimitError) as e:
yield self.sync_handler.wait_for_sync_for_user(sync_config)
- self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
+ self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.hs.config.hs_disabled = False
@@ -59,7 +59,7 @@ class SyncTestCase(tests.unittest.TestCase):
with self.assertRaises(ResourceLimitError) as e:
yield self.sync_handler.wait_for_sync_for_user(sync_config)
- self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
+ self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
def _generate_sync_config(self, user_id):
return SyncConfig(
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 622be2eef8..2ba80ccdcf 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -112,6 +112,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
@defer.inlineCallbacks
def test_invites(self):
+ yield self.persist(type="m.room.create", key="", creator=USER_ID)
yield self.check("get_invited_rooms_for_user", [USER_ID_2], [])
event = yield self.persist(
type="m.room.member", key=USER_ID_2, membership="invite"
@@ -133,7 +134,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
@defer.inlineCallbacks
def test_push_actions_for_user(self):
- yield self.persist(type="m.room.create", creator=USER_ID)
+ yield self.persist(type="m.room.create", key="", creator=USER_ID)
yield self.persist(type="m.room.join", key=USER_ID, membership="join")
yield self.persist(
type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join"
diff --git a/tests/server_notices/__init__.py b/tests/server_notices/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/server_notices/__init__.py
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
new file mode 100644
index 0000000000..ca9b31128a
--- /dev/null
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -0,0 +1,146 @@
+from mock import Mock
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, ServerNoticeMsgType
+from synapse.api.errors import ResourceLimitError
+from synapse.handlers.auth import AuthHandler
+from synapse.server_notices.resource_limits_server_notices import (
+ ResourceLimitsServerNotices,
+)
+
+from tests import unittest
+from tests.utils import setup_test_homeserver
+
+
+class AuthHandlers(object):
+ def __init__(self, hs):
+ self.auth_handler = AuthHandler(hs)
+
+
+class TestResourceLimitsServerNotices(unittest.TestCase):
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
+ self.hs.handlers = AuthHandlers(self.hs)
+ self.auth_handler = self.hs.handlers.auth_handler
+ self.server_notices_sender = self.hs.get_server_notices_sender()
+
+ # relying on [1] is far from ideal, but the only case where
+ # ResourceLimitsServerNotices class needs to be isolated is this test,
+ # general code should never have a reason to do so ...
+ self._rlsn = self.server_notices_sender._server_notices[1]
+ if not isinstance(self._rlsn, ResourceLimitsServerNotices):
+ raise Exception("Failed to find reference to ResourceLimitsServerNotices")
+
+ self._rlsn._store.user_last_seen_monthly_active = Mock(
+ return_value=defer.succeed(1000)
+ )
+ self._send_notice = self._rlsn._server_notices_manager.send_notice
+ self._rlsn._server_notices_manager.send_notice = Mock()
+ self._rlsn._state.get_current_state = Mock(return_value=defer.succeed(None))
+ self._rlsn._store.get_events = Mock(return_value=defer.succeed({}))
+
+ self._send_notice = self._rlsn._server_notices_manager.send_notice
+
+ self.hs.config.limit_usage_by_mau = True
+ self.user_id = "@user_id:test"
+
+ # self.server_notices_mxid = "@server:test"
+ # self.server_notices_mxid_display_name = None
+ # self.server_notices_mxid_avatar_url = None
+ # self.server_notices_room_name = "Server Notices"
+
+ self._rlsn._server_notices_manager.get_notice_room_for_user = Mock(
+ returnValue=""
+ )
+ self._rlsn._store.add_tag_to_room = Mock()
+ self.hs.config.admin_uri = "mailto:user@test.com"
+
+ @defer.inlineCallbacks
+ def test_maybe_send_server_notice_to_user_flag_off(self):
+ """Tests cases where the flags indicate nothing to do"""
+ # test hs disabled case
+ self.hs.config.hs_disabled = True
+
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+ self._send_notice.assert_not_called()
+ # Test when mau limiting disabled
+ self.hs.config.hs_disabled = False
+ self.hs.limit_usage_by_mau = False
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+ self._send_notice.assert_not_called()
+
+ @defer.inlineCallbacks
+ def test_maybe_send_server_notice_to_user_remove_blocked_notice(self):
+ """Test when user has blocked notice, but should have it removed"""
+
+ self._rlsn._auth.check_auth_blocking = Mock()
+ mock_event = Mock(
+ type=EventTypes.Message,
+ content={"msgtype": ServerNoticeMsgType},
+ )
+ self._rlsn._store.get_events = Mock(return_value=defer.succeed(
+ {"123": mock_event}
+ ))
+
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+ # Would be better to check the content, but once == remove blocking event
+ self._send_notice.assert_called_once()
+
+ @defer.inlineCallbacks
+ def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self):
+ """Test when user has blocked notice, but notice ought to be there (NOOP)"""
+ self._rlsn._auth.check_auth_blocking = Mock(
+ side_effect=ResourceLimitError(403, 'foo')
+ )
+
+ mock_event = Mock(
+ type=EventTypes.Message,
+ content={"msgtype": ServerNoticeMsgType},
+ )
+ self._rlsn._store.get_events = Mock(return_value=defer.succeed(
+ {"123": mock_event}
+ ))
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+ self._send_notice.assert_not_called()
+
+ @defer.inlineCallbacks
+ def test_maybe_send_server_notice_to_user_add_blocked_notice(self):
+ """Test when user does not have blocked notice, but should have one"""
+
+ self._rlsn._auth.check_auth_blocking = Mock(
+ side_effect=ResourceLimitError(403, 'foo')
+ )
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+ # Would be better to check contents, but 2 calls == set blocking event
+ self.assertTrue(self._send_notice.call_count == 2)
+
+ @defer.inlineCallbacks
+ def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
+ """Test when user does not have blocked notice, nor should they (NOOP)"""
+
+ self._rlsn._auth.check_auth_blocking = Mock()
+
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+ self._send_notice.assert_not_called()
+
+ @defer.inlineCallbacks
+ def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self):
+
+ """Test when user is not part of the MAU cohort - this should not ever
+ happen - but ...
+ """
+
+ self._rlsn._auth.check_auth_blocking = Mock()
+ self._rlsn._store.user_last_seen_monthly_active = Mock(
+ return_value=defer.succeed(None)
+ )
+ yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+
+ self._send_notice.assert_not_called()
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index c4e9fb72bf..02bf975fbf 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -22,7 +22,7 @@ from synapse.api.constants import EventTypes, Membership
from synapse.types import RoomID, UserID
from tests import unittest
-from tests.utils import setup_test_homeserver
+from tests.utils import create_room, setup_test_homeserver
class RedactionTestCase(unittest.TestCase):
@@ -41,6 +41,8 @@ class RedactionTestCase(unittest.TestCase):
self.room1 = RoomID.from_string("!abc123:test")
+ yield create_room(hs, self.room1.to_string(), self.u_alice.to_string())
+
self.depth = 1
@defer.inlineCallbacks
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index c83ef60062..978c66133d 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -22,7 +22,7 @@ from synapse.api.constants import EventTypes, Membership
from synapse.types import RoomID, UserID
from tests import unittest
-from tests.utils import setup_test_homeserver
+from tests.utils import create_room, setup_test_homeserver
class RoomMemberStoreTestCase(unittest.TestCase):
@@ -45,6 +45,8 @@ class RoomMemberStoreTestCase(unittest.TestCase):
self.room = RoomID.from_string("!abc123:test")
+ yield create_room(hs, self.room.to_string(), self.u_alice.to_string())
+
@defer.inlineCallbacks
def inject_room_member(self, room, user, membership, replaces_state=None):
builder = self.event_builder_factory.new(
diff --git a/tests/test_state.py b/tests/test_state.py
index 96fdb8636c..452a123c3a 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -18,7 +18,7 @@ from mock import Mock
from twisted.internet import defer
from synapse.api.auth import Auth
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RoomVersions
from synapse.events import FrozenEvent
from synapse.state import StateHandler, StateResolutionHandler
@@ -117,6 +117,9 @@ class StateGroupStore(object):
def register_event_id_state_group(self, event_id, state_group):
self._event_to_state_group[event_id] = state_group
+ def get_room_version(self, room_id):
+ return RoomVersions.V1
+
class DictObj(dict):
def __init__(self, **kwargs):
@@ -176,7 +179,9 @@ class StateTestCase(unittest.TestCase):
def test_branch_no_conflict(self):
graph = Graph(
nodes={
- "START": DictObj(type=EventTypes.Create, state_key="", depth=1),
+ "START": DictObj(
+ type=EventTypes.Create, state_key="", content={}, depth=1,
+ ),
"A": DictObj(type=EventTypes.Message, depth=2),
"B": DictObj(type=EventTypes.Message, depth=3),
"C": DictObj(type=EventTypes.Name, state_key="", depth=3),
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 45a78338d6..8d8ce0cab9 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -21,7 +21,7 @@ from synapse.events import FrozenEvent
from synapse.visibility import filter_events_for_server
import tests.unittest
-from tests.utils import setup_test_homeserver
+from tests.utils import create_room, setup_test_homeserver
logger = logging.getLogger(__name__)
@@ -36,6 +36,8 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
self.event_builder_factory = self.hs.get_event_builder_factory()
self.store = self.hs.get_datastore()
+ yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
+
@defer.inlineCallbacks
def test_filtering(self):
#
diff --git a/tests/utils.py b/tests/utils.py
index bb0fc74054..9f7ff94575 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -24,6 +24,7 @@ from six.moves.urllib import parse as urlparse
from twisted.internet import defer, reactor
+from synapse.api.constants import EventTypes
from synapse.api.errors import CodeMessageException, cs_error
from synapse.federation.transport import server
from synapse.http.server import HttpServer
@@ -539,3 +540,32 @@ class DeferredMockCallable(object):
"Expected not to received any calls, got:\n"
+ "\n".join(["call(%s)" % _format_call(c[0], c[1]) for c in calls])
)
+
+
+@defer.inlineCallbacks
+def create_room(hs, room_id, creator_id):
+ """Creates and persist a creation event for the given room
+
+ Args:
+ hs
+ room_id (str)
+ creator_id (str)
+ """
+
+ store = hs.get_datastore()
+ event_builder_factory = hs.get_event_builder_factory()
+ event_creation_handler = hs.get_event_creation_handler()
+
+ builder = event_builder_factory.new({
+ "type": EventTypes.Create,
+ "state_key": "",
+ "sender": creator_id,
+ "room_id": room_id,
+ "content": {},
+ })
+
+ event, context = yield event_creation_handler.create_new_client_event(
+ builder
+ )
+
+ yield store.persist_event(event, context)
|