diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
deleted file mode 100644
index 0ccef884e7..0000000000
--- a/synapse/handlers/_base.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2014 - 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import TYPE_CHECKING, Optional
-
-from synapse.api.ratelimiting import Ratelimiter
-from synapse.types import Requester
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-class BaseHandler:
- """
- Common base class for the event handlers.
-
- Deprecated: new code should not use this. Instead, Handler classes should define the
- fields they actually need. The utility methods should either be factored out to
- standalone helper functions, or to different Handler classes.
- """
-
- def __init__(self, hs: "HomeServer"):
- self.store = hs.get_datastore()
- self.auth = hs.get_auth()
- self.notifier = hs.get_notifier()
- self.state_handler = hs.get_state_handler()
- self.distributor = hs.get_distributor()
- self.clock = hs.get_clock()
- self.hs = hs
-
- # The rate_hz and burst_count are overridden on a per-user basis
- self.request_ratelimiter = Ratelimiter(
- store=self.store, clock=self.clock, rate_hz=0, burst_count=0
- )
- self._rc_message = self.hs.config.ratelimiting.rc_message
-
- # Check whether ratelimiting room admin message redaction is enabled
- # by the presence of rate limits in the config
- if self.hs.config.ratelimiting.rc_admin_redaction:
- self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
- store=self.store,
- clock=self.clock,
- rate_hz=self.hs.config.ratelimiting.rc_admin_redaction.per_second,
- burst_count=self.hs.config.ratelimiting.rc_admin_redaction.burst_count,
- )
- else:
- self.admin_redaction_ratelimiter = None
-
- self.server_name = hs.hostname
-
- self.event_builder_factory = hs.get_event_builder_factory()
-
- async def ratelimit(
- self,
- requester: Requester,
- update: bool = True,
- is_admin_redaction: bool = False,
- ) -> None:
- """Ratelimits requests.
-
- Args:
- requester
- update: Whether to record that a request is being processed.
- Set to False when doing multiple checks for one request (e.g.
- to check up front if we would reject the request), and set to
- True for the last call for a given request.
- is_admin_redaction: Whether this is a room admin/moderator
- redacting an event. If so then we may apply different
- ratelimits depending on config.
-
- Raises:
- LimitExceededError if the request should be ratelimited
- """
- user_id = requester.user.to_string()
-
- # The AS user itself is never rate limited.
- app_service = self.store.get_app_service_by_user_id(user_id)
- if app_service is not None:
- return # do not ratelimit app service senders
-
- messages_per_second = self._rc_message.per_second
- burst_count = self._rc_message.burst_count
-
- # Check if there is a per user override in the DB.
- override = await self.store.get_ratelimit_for_user(user_id)
- if override:
- # If overridden with a null Hz then ratelimiting has been entirely
- # disabled for the user
- if not override.messages_per_second:
- return
-
- messages_per_second = override.messages_per_second
- burst_count = override.burst_count
-
- if is_admin_redaction and self.admin_redaction_ratelimiter:
- # If we have separate config for admin redactions, use a separate
- # ratelimiter as to not have user_ids clash
- await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
- else:
- # Override rate and burst count per-user
- await self.request_ratelimiter.ratelimit(
- requester,
- rate_hz=messages_per_second,
- burst_count=burst_count,
- update=update,
- )
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 5a5f124ddf..87e415df75 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -67,12 +67,8 @@ class AccountValidityHandler:
and self._account_validity_renew_by_email_enabled
):
# Don't do email-specific configuration if renewal by email is disabled.
- self._template_html = (
- hs.config.account_validity.account_validity_template_html
- )
- self._template_text = (
- hs.config.account_validity.account_validity_template_text
- )
+ self._template_html = hs.config.email.account_validity_template_html
+ self._template_text = hs.config.email.account_validity_template_text
self._renew_email_subject = (
hs.config.account_validity.account_validity_renew_email_subject
)
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index bfa7f2c545..a53cd62d3c 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -21,18 +21,15 @@ from synapse.events import EventBase
from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class AdminHandler(BaseHandler):
+class AdminHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_store = self.storage.state
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index a8c717efd5..f4612a5b92 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -52,7 +52,6 @@ from synapse.api.errors import (
UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
-from synapse.handlers._base import BaseHandler
from synapse.handlers.ui_auth import (
INTERACTIVE_AUTH_CHECKERS,
UIAuthSessionDataConstants,
@@ -186,19 +185,20 @@ class LoginTokenAttributes:
auth_provider_id = attr.ib(type=str)
-class AuthHandler(BaseHandler):
+class AuthHandler:
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
self.checkers: Dict[str, UserInteractiveAuthChecker] = {}
for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
inst = auth_checker_class(hs)
if inst.is_enabled():
self.checkers[inst.AUTH_TYPE] = inst # type: ignore
- self.bcrypt_rounds = hs.config.bcrypt_rounds
+ self.bcrypt_rounds = hs.config.registration.bcrypt_rounds
# we can't use hs.get_module_api() here, because to do so will create an
# import loop.
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 9ae5b7750e..e88c3c27ce 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -19,19 +19,17 @@ from synapse.api.errors import SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import Requester, UserID, create_requester
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class DeactivateAccountHandler(BaseHandler):
+class DeactivateAccountHandler:
"""Handler which deals with deactivating user accounts."""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
self.hs = hs
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
@@ -133,6 +131,10 @@ class DeactivateAccountHandler(BaseHandler):
# delete from user directory
await self.user_directory_handler.handle_local_user_deactivated(user_id)
+ # If the user is present in the monthly active users table
+ # remove them
+ await self.store.remove_deactivated_user_from_mau_table(user_id)
+
# Mark the user as erased, if they asked for that
if erase_data:
user = UserID.from_string(user_id)
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 35334725d7..75e6019760 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -40,8 +40,6 @@ from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.util.retryutils import NotRetryingDestination
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -50,14 +48,16 @@ logger = logging.getLogger(__name__)
MAX_DEVICE_DISPLAY_NAME_LEN = 100
-class DeviceWorkerHandler(BaseHandler):
+class DeviceWorkerHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.clock = hs.get_clock()
self.hs = hs
+ self.store = hs.get_datastore()
+ self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
self.state_store = hs.get_storage().state
self._auth_handler = hs.get_auth_handler()
+ self.server_name = hs.hostname
@trace
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 5cfba3c817..14ed7d9879 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -31,26 +31,25 @@ from synapse.appservice import ApplicationService
from synapse.storage.databases.main.directory import RoomAliasMapping
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class DirectoryHandler(BaseHandler):
+class DirectoryHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.auth = hs.get_auth()
+ self.hs = hs
self.state = hs.get_state_handler()
self.appservice_handler = hs.get_application_service_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastore()
self.config = hs.config
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
- self.require_membership = hs.config.require_membership_for_aliases
+ self.require_membership = hs.config.server.require_membership_for_aliases
self.third_party_event_rules = hs.get_third_party_event_rules()
+ self.server_name = hs.hostname
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index cb81fa0986..d089c56286 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -22,7 +22,8 @@ from synapse.api.constants import (
RestrictedJoinRuleTypes,
)
from synapse.api.errors import AuthError, Codes, SynapseError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
+from synapse.api.room_versions import RoomVersion
+from synapse.event_auth import check_auth_rules_for_event
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
@@ -45,21 +46,17 @@ class EventAuthHandler:
self._store = hs.get_datastore()
self._server_name = hs.hostname
- async def check_from_context(
+ async def check_auth_rules_from_context(
self,
- room_version: str,
+ room_version_obj: RoomVersion,
event: EventBase,
context: EventContext,
- do_sig_check: bool = True,
) -> None:
+ """Check an event passes the auth rules at its own auth events"""
auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
-
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
- event_auth.check(
- room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
- )
+ check_auth_rules_for_event(room_version_obj, event, auth_events)
def compute_auth_events(
self,
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 4b3f037072..1f64534a8a 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -25,8 +25,6 @@ from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -34,11 +32,11 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class EventStreamHandler(BaseHandler):
+class EventStreamHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
self.clock = hs.get_clock()
+ self.hs = hs
self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
@@ -138,9 +136,9 @@ class EventStreamHandler(BaseHandler):
return chunk
-class EventHandler(BaseHandler):
+class EventHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
self.storage = hs.get_storage()
async def get_event(
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index adbd150e46..3e341bd287 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -45,11 +45,14 @@ from synapse.api.errors import (
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
+from synapse.event_auth import (
+ check_auth_rules_for_event,
+ validate_event_for_room_version,
+)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.federation.federation_client import InvalidResponseError
-from synapse.handlers._base import BaseHandler
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
@@ -74,15 +77,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class FederationHandler(BaseHandler):
+class FederationHandler:
"""Handles general incoming federation requests
Incoming events are *not* handled here, for which see FederationEventHandler.
"""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
self.hs = hs
self.store = hs.get_datastore()
@@ -95,6 +96,7 @@ class FederationHandler(BaseHandler):
self.is_mine_id = hs.is_mine_id
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.event_builder_factory = hs.get_event_builder_factory()
self._event_auth_handler = hs.get_event_auth_handler()
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.config = hs.config
@@ -723,8 +725,8 @@ class FederationHandler(BaseHandler):
state_ids,
)
- builder = self.event_builder_factory.new(
- room_version.identifier,
+ builder = self.event_builder_factory.for_room_version(
+ room_version,
{
"type": EventTypes.Member,
"content": event_content,
@@ -747,10 +749,9 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- await self._event_auth_handler.check_from_context(
- room_version.identifier, event, context, do_sig_check=False
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version, event, context
)
-
return event
async def on_invite_request(
@@ -767,7 +768,7 @@ class FederationHandler(BaseHandler):
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
- if self.hs.config.block_non_admin_invites:
+ if self.hs.config.server.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
if not await self.spam_checker.user_may_invite(
@@ -902,9 +903,9 @@ class FederationHandler(BaseHandler):
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
- room_version = await self.store.get_room_version_id(room_id)
- builder = self.event_builder_factory.new(
- room_version,
+ room_version_obj = await self.store.get_room_version(room_id)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj,
{
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
@@ -921,8 +922,8 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- await self._event_auth_handler.check_from_context(
- room_version, event, context, do_sig_check=False
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
@@ -954,10 +955,10 @@ class FederationHandler(BaseHandler):
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
- room_version = await self.store.get_room_version_id(room_id)
+ room_version_obj = await self.store.get_room_version(room_id)
- builder = self.event_builder_factory.new(
- room_version,
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj,
{
"type": EventTypes.Member,
"content": {"membership": Membership.KNOCK},
@@ -983,8 +984,8 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
- await self._event_auth_handler.check_from_context(
- room_version, event, context, do_sig_check=False
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
@@ -1173,7 +1174,8 @@ class FederationHandler(BaseHandler):
auth_for_e[(EventTypes.Create, "")] = create_event
try:
- event_auth.check(room_version, e, auth_events=auth_for_e)
+ validate_event_for_room_version(room_version, e)
+ check_auth_rules_for_event(room_version, e, auth_for_e)
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
@@ -1250,8 +1252,10 @@ class FederationHandler(BaseHandler):
}
if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
- room_version = await self.store.get_room_version_id(room_id)
- builder = self.event_builder_factory.new(room_version, event_dict)
+ room_version_obj = await self.store.get_room_version(room_id)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
@@ -1259,7 +1263,7 @@ class FederationHandler(BaseHandler):
)
event, context = await self.add_display_name_to_third_party_invite(
- room_version, event_dict, event, context
+ room_version_obj, event_dict, event, context
)
EventValidator().validate_new(event, self.config)
@@ -1269,8 +1273,9 @@ class FederationHandler(BaseHandler):
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
- await self._event_auth_handler.check_from_context(
- room_version, event, context
+ validate_event_for_room_version(room_version_obj, event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
@@ -1304,22 +1309,25 @@ class FederationHandler(BaseHandler):
"""
assert_params_in_dict(event_dict, ["room_id"])
- room_version = await self.store.get_room_version_id(event_dict["room_id"])
+ room_version_obj = await self.store.get_room_version(event_dict["room_id"])
# NB: event_dict has a particular specced format we might need to fudge
# if we change event formats too much.
- builder = self.event_builder_factory.new(room_version, event_dict)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
- room_version, event_dict, event, context
+ room_version_obj, event_dict, event, context
)
try:
- await self._event_auth_handler.check_from_context(
- room_version, event, context
+ validate_event_for_room_version(room_version_obj, event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
@@ -1336,7 +1344,7 @@ class FederationHandler(BaseHandler):
async def add_display_name_to_third_party_invite(
self,
- room_version: str,
+ room_version_obj: RoomVersion,
event_dict: JsonDict,
event: EventBase,
context: EventContext,
@@ -1368,7 +1376,9 @@ class FederationHandler(BaseHandler):
# auth checks. If we need the invite and don't have it then the
# auth check code will explode appropriately.
- builder = self.event_builder_factory.new(room_version, event_dict)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 01fd841122..f640b417b3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -29,7 +29,6 @@ from typing import (
from prometheus_client import Counter
-from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
@@ -47,7 +46,11 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.event_auth import auth_types_for_event
+from synapse.event_auth import (
+ auth_types_for_event,
+ check_auth_rules_for_event,
+ validate_event_for_room_version,
+)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_client import InvalidResponseError
@@ -68,11 +71,7 @@ from synapse.types import (
UserID,
get_domain_from_id,
)
-from synapse.util.async_helpers import (
- Linearizer,
- concurrently_execute,
- yieldable_gather_results,
-)
+from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
@@ -357,6 +356,11 @@ class FederationEventHandler:
)
# all looks good, we can persist the event.
+
+ # First, precalculate the joined hosts so that the federation sender doesn't
+ # need to.
+ await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
+
await self._run_push_actions_and_persist_event(event, context)
return event, context
@@ -890,6 +894,9 @@ class FederationEventHandler:
backfilled=backfilled,
)
except AuthError as e:
+ # FIXME richvdh 2021/10/07 I don't think this is reachable. Let's log it
+ # for now
+ logger.exception("Unexpected AuthError from _check_event_auth")
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
await self._run_push_actions_and_persist_event(event, context, backfilled)
@@ -1011,9 +1018,8 @@ class FederationEventHandler:
room_version = await self._store.get_room_version(marker_event.room_id)
create_event = await self._store.get_create_event_for_room(marker_event.room_id)
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
- if (
- not room_version.msc2716_historical
- or not self._config.experimental.msc2716_enabled
+ if not room_version.msc2716_historical and (
+ not self._config.experimental.msc2716_enabled
or marker_event.sender != room_creator
):
return
@@ -1155,7 +1161,10 @@ class FederationEventHandler:
return
logger.info(
- "Persisting %i of %i remaining events", len(roots), len(event_map)
+ "Persisting %i of %i remaining outliers: %s",
+ len(roots),
+ len(event_map),
+ shortstr(e.event_id for e in roots),
)
await self._auth_and_persist_fetched_events_inner(origin, room_id, roots)
@@ -1189,7 +1198,10 @@ class FederationEventHandler:
allow_rejected=True,
)
- async def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
+ room_version = await self._store.get_room_version_id(room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
with nested_logging_context(suffix=event.event_id):
auth = {}
for auth_event_id in event.auth_event_ids():
@@ -1207,17 +1219,16 @@ class FederationEventHandler:
auth[(ae.type, ae.state_key)] = ae
context = EventContext.for_outlier()
- context = await self._check_event_auth(
- origin,
- event,
- context,
- claimed_auth_event_map=auth,
- )
+ try:
+ validate_event_for_room_version(room_version_obj, event)
+ check_auth_rules_for_event(room_version_obj, event, auth)
+ except AuthError as e:
+ logger.warning("Rejecting %r because %s", event, e)
+ context.rejected = RejectedReason.AUTH_ERROR
+
return event, context
- events_to_persist = (
- x for x in await yieldable_gather_results(prep, fetched_events) if x
- )
+ events_to_persist = (x for x in (prep(event) for event in fetched_events) if x)
await self.persist_events_and_notify(room_id, tuple(events_to_persist))
async def _check_event_auth(
@@ -1226,7 +1237,6 @@ class FederationEventHandler:
event: EventBase,
context: EventContext,
state: Optional[Iterable[EventBase]] = None,
- claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
backfilled: bool = False,
) -> EventContext:
"""
@@ -1242,42 +1252,45 @@ class FederationEventHandler:
The state events used to check the event for soft-fail. If this is
not provided the current state events will be used.
- claimed_auth_event_map:
- A map of (type, state_key) => event for the event's claimed auth_events.
- Possibly including events that were rejected, or are in the wrong room.
-
- Only populated when populating outliers.
-
backfilled: True if the event was backfilled.
Returns:
The updated context object.
"""
- # claimed_auth_event_map should be given iff the event is an outlier
- assert bool(claimed_auth_event_map) == event.internal_metadata.outlier
+ # This method should only be used for non-outliers
+ assert not event.internal_metadata.outlier
+ # first of all, check that the event itself is valid.
room_version = await self._store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
- if claimed_auth_event_map:
- # if we have a copy of the auth events from the event, use that as the
- # basis for auth.
- auth_events = claimed_auth_event_map
- else:
- # otherwise, we calculate what the auth events *should* be, and use that
- prev_state_ids = await context.get_prev_state_ids()
- auth_events_ids = self._event_auth_handler.compute_auth_events(
- event, prev_state_ids, for_verification=True
- )
- auth_events_x = await self._store.get_events(auth_events_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
+ try:
+ validate_event_for_room_version(room_version_obj, event)
+ except AuthError as e:
+ logger.warning("While validating received event %r: %s", event, e)
+ # TODO: use a different rejected reason here?
+ context.rejected = RejectedReason.AUTH_ERROR
+ return context
+
+ # calculate what the auth events *should* be, to use as a basis for auth.
+ prev_state_ids = await context.get_prev_state_ids()
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
+ event, prev_state_ids, for_verification=True
+ )
+ auth_events_x = await self._store.get_events(auth_events_ids)
+ calculated_auth_event_map = {
+ (e.type, e.state_key): e for e in auth_events_x.values()
+ }
try:
(
context,
auth_events_for_auth,
) = await self._update_auth_events_and_context_for_auth(
- origin, event, context, auth_events
+ origin,
+ event,
+ context,
+ calculated_auth_event_map=calculated_auth_event_map,
)
except Exception:
# We don't really mind if the above fails, so lets not fail
@@ -1289,24 +1302,17 @@ class FederationEventHandler:
"Ignoring failure and continuing processing of event.",
event.event_id,
)
- auth_events_for_auth = auth_events
+ auth_events_for_auth = calculated_auth_event_map
try:
- event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
+ check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth)
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
+ return context
- if not context.rejected:
- await self._check_for_soft_fail(event, state, backfilled, origin=origin)
- await self._maybe_kick_guest_users(event)
-
- # If we are going to send this event over federation we precaclculate
- # the joined hosts.
- if event.internal_metadata.get_send_on_behalf_of():
- await self._event_creation_handler.cache_joined_hosts_for_event(
- event, context
- )
+ await self._check_for_soft_fail(event, state, backfilled, origin=origin)
+ await self._maybe_kick_guest_users(event)
return context
@@ -1404,7 +1410,7 @@ class FederationEventHandler:
}
try:
- event_auth.check(room_version_obj, event, auth_events=current_auth_events)
+ check_auth_rules_for_event(room_version_obj, event, current_auth_events)
except AuthError as e:
logger.warning(
"Soft-failing %r (from %s) because %s",
@@ -1425,7 +1431,7 @@ class FederationEventHandler:
origin: str,
event: EventBase,
context: EventContext,
- input_auth_events: StateMap[EventBase],
+ calculated_auth_event_map: StateMap[EventBase],
) -> Tuple[EventContext, StateMap[EventBase]]:
"""Helper for _check_event_auth. See there for docs.
@@ -1443,19 +1449,17 @@ class FederationEventHandler:
event:
context:
- input_auth_events:
- Map from (event_type, state_key) to event
-
- Normally, our calculated auth_events based on the state of the room
- at the event's position in the DAG, though occasionally (eg if the
- event is an outlier), may be the auth events claimed by the remote
- server.
+ calculated_auth_event_map:
+ Our calculated auth_events based on the state of the room
+ at the event's position in the DAG.
Returns:
updated context, updated auth event map
"""
- # take a copy of input_auth_events before we modify it.
- auth_events: MutableStateMap[EventBase] = dict(input_auth_events)
+ assert not event.internal_metadata.outlier
+
+ # take a copy of calculated_auth_event_map before we modify it.
+ auth_events: MutableStateMap[EventBase] = dict(calculated_auth_event_map)
event_auth_events = set(event.auth_event_ids())
@@ -1475,6 +1479,11 @@ class FederationEventHandler:
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
+ # missing_auth is now the set of event_ids which:
+ # a. are listed in event.auth_events, *and*
+ # b. are *not* part of our calculated auth events based on room state, *and*
+ # c. are *not* yet in our database.
+
if missing_auth:
# If we don't have all the auth events, we need to get them.
logger.info("auth_events contains unknown events: %s", missing_auth)
@@ -1496,19 +1505,31 @@ class FederationEventHandler:
}
)
- if event.internal_metadata.is_outlier():
- # XXX: given that, for an outlier, we'll be working with the
- # event's *claimed* auth events rather than those we calculated:
- # (a) is there any point in this test, since different_auth below will
- # obviously be empty
- # (b) alternatively, why don't we do it earlier?
- logger.info("Skipping auth_event fetch for outlier")
- return context, auth_events
+ # auth_events now contains
+ # 1. our *calculated* auth events based on the room state, plus:
+ # 2. any events which:
+ # a. are listed in `event.auth_events`, *and*
+ # b. are not part of our calculated auth events, *and*
+ # c. were not in our database before the call to /event_auth
+ # d. have since been added to our database (most likely by /event_auth).
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
+ # different_auth is the set of events which *are* in `event.auth_events`, but
+ # which are *not* in `auth_events`. Comparing with (2.) above, this means
+ # exclusively the set of `event.auth_events` which we already had in our
+ # database before any call to /event_auth.
+ #
+ # I'm reasonably sure that the fact that events returned by /event_auth are
+ # blindly added to auth_events (and hence excluded from different_auth) is a bug
+ # - though it's a very long-standing one (see
+ # https://github.com/matrix-org/synapse/commit/78015948a7febb18e000651f72f8f58830a55b93#diff-0bc92da3d703202f5b9be2d3f845e375f5b1a6bc6ba61705a8af9be1121f5e42R786
+ # from Jan 2015 which seems to add it, though it actually just moves it from
+ # elsewhere (before that, it gets lost in a mess of huge "various bug fixes"
+ # PRs).
+
if not different_auth:
return context, auth_events
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index fe8a995892..9c319b5383 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -39,8 +39,6 @@ from synapse.util.stringutils import (
valid_id_server_location,
)
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -49,15 +47,14 @@ logger = logging.getLogger(__name__)
id_server_scheme = "https://"
-class IdentityHandler(BaseHandler):
+class IdentityHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
# An HTTP client for contacting trusted URLs.
self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients.
self.blacklisting_http_client = SimpleHttpClient(
- hs, ip_blacklist=hs.config.federation_ip_range_blacklist
+ hs, ip_blacklist=hs.config.server.federation_ip_range_blacklist
)
self.federation_http_client = hs.get_federation_http_client()
self.hs = hs
@@ -573,9 +570,15 @@ class IdentityHandler(BaseHandler):
# Try to validate as email
if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ # Remote emails will only be used if a valid identity server is provided.
+ assert (
+ self.hs.config.registration.account_threepid_delegate_email is not None
+ )
+
# Ask our delegated email identity server
validation_session = await self.threepid_from_creds(
- self.hs.config.account_threepid_delegate_email, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_email,
+ threepid_creds,
)
elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
# Get a validated session matching these details
@@ -587,10 +590,11 @@ class IdentityHandler(BaseHandler):
return validation_session
# Try to validate as msisdn
- if self.hs.config.account_threepid_delegate_msisdn:
+ if self.hs.config.registration.account_threepid_delegate_msisdn:
# Ask our delegated msisdn identity server
validation_session = await self.threepid_from_creds(
- self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_msisdn,
+ threepid_creds,
)
return validation_session
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 9ad39a65d8..d4e4556155 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -31,8 +31,6 @@ from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -40,9 +38,11 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class InitialSyncHandler(BaseHandler):
+class InitialSyncHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.state_handler = hs.get_state_handler()
self.hs = hs
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 6ee21e8eed..42a1131b83 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -16,6 +16,7 @@
# limitations under the License.
import logging
import random
+from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
from canonicaljson import encode_canonical_json
@@ -39,9 +40,11 @@ from synapse.api.errors import (
NotFoundError,
ShadowBanError,
SynapseError,
+ UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.api.urls import ConsentURIBuilder
+from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
@@ -59,8 +62,6 @@ from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.server import HomeServer
@@ -79,7 +80,7 @@ class MessageHandler:
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._event_serializer = hs.get_event_client_serializer()
- self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+ self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
# The scheduled call to self._expire_event. None if no call is currently
# scheduled.
@@ -413,7 +414,9 @@ class EventCreationHandler:
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.config = hs.config
- self.require_membership_for_aliases = hs.config.require_membership_for_aliases
+ self.require_membership_for_aliases = (
+ hs.config.server.require_membership_for_aliases
+ )
self._events_shard_config = self.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
@@ -423,13 +426,12 @@ class EventCreationHandler:
Membership.JOIN,
Membership.KNOCK,
}
- if self.hs.config.include_profile_data_on_invite:
+ if self.hs.config.server.include_profile_data_on_invite:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
- # This is only used to get at ratelimit function
- self.base_handler = BaseHandler(hs)
+ self.request_ratelimiter = hs.get_request_ratelimiter()
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
@@ -459,11 +461,11 @@ class EventCreationHandler:
#
self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {}
# The number of forward extremeities before a dummy event is sent.
- self._dummy_events_threshold = hs.config.dummy_events_threshold
+ self._dummy_events_threshold = hs.config.server.dummy_events_threshold
if (
self.config.worker.run_background_tasks
- and self.config.cleanup_extremities_with_dummy_events
+ and self.config.server.cleanup_extremities_with_dummy_events
):
self.clock.looping_call(
lambda: run_as_background_process(
@@ -475,7 +477,7 @@ class EventCreationHandler:
self._message_handler = hs.get_message_handler()
- self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+ self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
self._external_cache = hs.get_external_cache()
@@ -549,16 +551,22 @@ class EventCreationHandler:
await self.auth.check_auth_blocking(requester=requester)
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
- room_version = event_dict["content"]["room_version"]
+ room_version_id = event_dict["content"]["room_version"]
+ room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
+ if not room_version_obj:
+ # this can happen if support is withdrawn for a room version
+ raise UnsupportedRoomVersionError(room_version_id)
else:
try:
- room_version = await self.store.get_room_version_id(
+ room_version_obj = await self.store.get_room_version(
event_dict["room_id"]
)
except NotFoundError:
raise AuthError(403, "Unknown room")
- builder = self.event_builder_factory.new(room_version, event_dict)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
self.validator.validate_builder(builder)
@@ -1064,9 +1072,17 @@ class EventCreationHandler:
EventTypes.Create,
"",
):
- room_version = event.content.get("room_version", RoomVersions.V1.identifier)
+ room_version_id = event.content.get(
+ "room_version", RoomVersions.V1.identifier
+ )
+ room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
+ if not room_version_obj:
+ raise UnsupportedRoomVersionError(
+ "Attempt to create a room with unsupported room version %s"
+ % (room_version_id,)
+ )
else:
- room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = await self.store.get_room_version(event.room_id)
if event.internal_metadata.is_out_of_band_membership():
# the only sort of out-of-band-membership events we expect to see here are
@@ -1075,8 +1091,9 @@ class EventCreationHandler:
assert event.content["membership"] == Membership.LEAVE
else:
try:
- await self._event_auth_handler.check_from_context(
- room_version, event, context
+ validate_event_for_room_version(room_version_obj, event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
@@ -1302,7 +1319,7 @@ class EventCreationHandler:
original_event and event.sender != original_event.sender
)
- await self.base_handler.ratelimit(
+ await self.request_ratelimiter.ratelimit(
requester, is_admin_redaction=is_admin_redaction
)
@@ -1456,6 +1473,39 @@ class EventCreationHandler:
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")
+ if event.type == EventTypes.MSC2716_INSERTION:
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ create_event = await self.store.get_create_event_for_room(event.room_id)
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+
+ # Only check an insertion event if the room version
+ # supports it or the event is from the room creator.
+ if room_version_obj.msc2716_historical or (
+ self.config.experimental.msc2716_enabled
+ and event.sender == room_creator
+ ):
+ next_batch_id = event.content.get(
+ EventContentFields.MSC2716_NEXT_BATCH_ID
+ )
+ conflicting_insertion_event_id = (
+ await self.store.get_insertion_event_by_batch_id(
+ event.room_id, next_batch_id
+ )
+ )
+ if conflicting_insertion_event_id is not None:
+ # The current insertion event that we're processing is invalid
+ # because an insertion event already exists in the room with the
+ # same next_batch_id. We can't allow multiple because the batch
+ # pointing will get weird, e.g. we can't determine which insertion
+ # event the batch event is pointing to.
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Another insertion event already exists with the same next_batch_id",
+ errcode=Codes.INVALID_PARAM,
+ )
+
# Mark any `m.historical` messages as backfilled so they don't appear
# in `/sync` and have the proper decrementing `stream_ordering` as we import
backfilled = False
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 08b93b3ec1..176e4dfdd4 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -85,23 +85,29 @@ class PaginationHandler:
self._purges_by_id: Dict[str, PurgeStatus] = {}
self._event_serializer = hs.get_event_client_serializer()
- self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime
+ self._retention_default_max_lifetime = (
+ hs.config.server.retention_default_max_lifetime
+ )
- self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min
- self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max
+ self._retention_allowed_lifetime_min = (
+ hs.config.server.retention_allowed_lifetime_min
+ )
+ self._retention_allowed_lifetime_max = (
+ hs.config.server.retention_allowed_lifetime_max
+ )
- if hs.config.worker.run_background_tasks and hs.config.retention_enabled:
+ if hs.config.worker.run_background_tasks and hs.config.server.retention_enabled:
# Run the purge jobs described in the configuration file.
- for job in hs.config.retention_purge_jobs:
+ for job in hs.config.server.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
self.clock.looping_call(
run_as_background_process,
- job["interval"],
+ job.interval,
"purge_history_for_rooms_in_range",
self.purge_history_for_rooms_in_range,
- job["shortest_max_lifetime"],
- job["longest_max_lifetime"],
+ job.shortest_max_lifetime,
+ job.longest_max_lifetime,
)
async def purge_history_for_rooms_in_range(
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index b23a1541bc..e6c3cf585b 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -32,8 +32,6 @@ from synapse.types import (
get_domain_from_id,
)
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -43,7 +41,7 @@ MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000
-class ProfileHandler(BaseHandler):
+class ProfileHandler:
"""Handles fetching and updating user profile information.
ProfileHandler can be instantiated directly on workers and will
@@ -54,7 +52,9 @@ class ProfileHandler(BaseHandler):
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ self.hs = hs
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
@@ -62,6 +62,7 @@ class ProfileHandler(BaseHandler):
)
self.user_directory_handler = hs.get_user_directory_handler()
+ self.request_ratelimiter = hs.get_request_ratelimiter()
if hs.config.worker.run_background_tasks:
self.clock.looping_call(
@@ -178,7 +179,7 @@ class ProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
- if not by_admin and not self.hs.config.enable_set_displayname:
+ if not by_admin and not self.hs.config.registration.enable_set_displayname:
profile = await self.store.get_profileinfo(target_user.localpart)
if profile.display_name:
raise SynapseError(
@@ -268,7 +269,7 @@ class ProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
- if not by_admin and not self.hs.config.enable_set_avatar_url:
+ if not by_admin and not self.hs.config.registration.enable_set_avatar_url:
profile = await self.store.get_profileinfo(target_user.localpart)
if profile.avatar_url:
raise SynapseError(
@@ -346,7 +347,7 @@ class ProfileHandler(BaseHandler):
if not self.hs.is_mine(target_user):
return
- await self.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
# Do not actually update the room state for shadow-banned users.
if requester.shadow_banned:
@@ -397,7 +398,7 @@ class ProfileHandler(BaseHandler):
# when building a membership event. In this case, we must allow the
# lookup.
if (
- not self.hs.config.limit_profile_requests_to_users_who_share_rooms
+ not self.hs.config.server.limit_profile_requests_to_users_who_share_rooms
or not requester
):
return
diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py
index bd8160e7ed..58593e570e 100644
--- a/synapse/handlers/read_marker.py
+++ b/synapse/handlers/read_marker.py
@@ -17,17 +17,14 @@ from typing import TYPE_CHECKING
from synapse.util.async_helpers import Linearizer
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class ReadMarkerHandler(BaseHandler):
+class ReadMarkerHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
self.server_name = hs.config.server.server_name
self.store = hs.get_datastore()
self.account_data_handler = hs.get_account_data_handler()
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index f21f33ada2..374e961e3b 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -16,7 +16,6 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from synapse.api.constants import ReadReceiptEventFields
from synapse.appservice import ApplicationService
-from synapse.handlers._base import BaseHandler
from synapse.streams import EventSource
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
@@ -26,10 +25,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class ReceiptsHandler(BaseHandler):
+class ReceiptsHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.notifier = hs.get_notifier()
self.server_name = hs.config.server.server_name
self.store = hs.get_datastore()
self.event_auth_handler = hs.get_event_auth_handler()
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 4f99f137a2..a0e6a01775 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -41,8 +41,6 @@ from synapse.spam_checker_api import RegistrationBehaviour
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID, create_requester
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -85,9 +83,10 @@ class LoginDict(TypedDict):
refresh_token: Optional[str]
-class RegistrationHandler(BaseHandler):
+class RegistrationHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
self.hs = hs
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
@@ -116,8 +115,8 @@ class RegistrationHandler(BaseHandler):
self._register_device_client = self.register_device_inner
self.pusher_pool = hs.get_pusherpool()
- self.session_lifetime = hs.config.session_lifetime
- self.access_token_lifetime = hs.config.access_token_lifetime
+ self.session_lifetime = hs.config.registration.session_lifetime
+ self.access_token_lifetime = hs.config.registration.access_token_lifetime
init_counters_for_auth_provider("")
@@ -340,8 +339,13 @@ class RegistrationHandler(BaseHandler):
auth_provider=(auth_provider_id or ""),
).inc()
+ # If the user does not need to consent at registration, auto-join any
+ # configured rooms.
if not self.hs.config.consent.user_consent_at_registration:
- if not self.hs.config.auto_join_rooms_for_guests and make_guest:
+ if (
+ not self.hs.config.registration.auto_join_rooms_for_guests
+ and make_guest
+ ):
logger.info(
"Skipping auto-join for %s because auto-join for guests is disabled",
user_id,
@@ -387,7 +391,7 @@ class RegistrationHandler(BaseHandler):
"preset": self.hs.config.registration.autocreate_auto_join_room_preset,
}
- # If the configuration providers a user ID to create rooms with, use
+ # If the configuration provides a user ID to create rooms with, use
# that instead of the first user registered.
requires_join = False
if self.hs.config.registration.auto_join_user_id:
@@ -510,7 +514,7 @@ class RegistrationHandler(BaseHandler):
# we don't have a local user in the room to craft up an invite with.
requires_invite = await self.store.is_host_joined(
room_id,
- self.server_name,
+ self._server_name,
)
if requires_invite:
@@ -854,7 +858,7 @@ class RegistrationHandler(BaseHandler):
# Necessary due to auth checks prior to the threepid being
# written to the db
if is_threepid_reserved(
- self.hs.config.mau_limits_reserved_threepids, threepid
+ self.hs.config.server.mau_limits_reserved_threepids, threepid
):
await self.store.upsert_monthly_active_user(user_id)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 8fede5e935..7072bca1fc 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -52,6 +52,7 @@ from synapse.api.errors import (
)
from synapse.api.filtering import Filter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
+from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_power_levels_contents
from synapse.rest.admin._base import assert_user_is_admin
@@ -75,8 +76,6 @@ from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -87,15 +86,18 @@ id_server_scheme = "https://"
FIVE_MINUTES_IN_MS = 5 * 60 * 1000
-class RoomCreationHandler(BaseHandler):
+class RoomCreationHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+ self.hs = hs
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self.config = hs.config
+ self.request_ratelimiter = hs.get_request_ratelimiter()
# Room state based off defined presets
self._presets_dict: Dict[str, Dict[str, Any]] = {
@@ -161,7 +163,7 @@ class RoomCreationHandler(BaseHandler):
Raises:
ShadowBanError if the requester is shadow-banned.
"""
- await self.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
user_id = requester.user.to_string()
@@ -237,8 +239,9 @@ class RoomCreationHandler(BaseHandler):
},
},
)
- old_room_version = await self.store.get_room_version_id(old_room_id)
- await self._event_auth_handler.check_from_context(
+ old_room_version = await self.store.get_room_version(old_room_id)
+ validate_event_for_room_version(old_room_version, tombstone_event)
+ await self._event_auth_handler.check_auth_rules_from_context(
old_room_version, tombstone_event, tombstone_context
)
@@ -663,10 +666,10 @@ class RoomCreationHandler(BaseHandler):
raise SynapseError(403, "You are not permitted to create rooms")
if ratelimit:
- await self.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
- "room_version", self.config.default_room_version.identifier
+ "room_version", self.config.server.default_room_version.identifier
)
if not isinstance(room_version_id, str):
@@ -858,6 +861,7 @@ class RoomCreationHandler(BaseHandler):
"invite",
ratelimit=False,
content=content,
+ new_room=True,
)
for invite_3pid in invite_3pid_list:
@@ -960,6 +964,7 @@ class RoomCreationHandler(BaseHandler):
"join",
ratelimit=ratelimit,
content=creator_join_profile,
+ new_room=True,
)
# We treat the power levels override specially as this needs to be one
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
new file mode 100644
index 0000000000..51dd4e7555
--- /dev/null
+++ b/synapse/handlers/room_batch.py
@@ -0,0 +1,423 @@
+import logging
+from typing import TYPE_CHECKING, List, Tuple
+
+from synapse.api.constants import EventContentFields, EventTypes
+from synapse.appservice import ApplicationService
+from synapse.http.servlet import assert_params_in_dict
+from synapse.types import JsonDict, Requester, UserID, create_requester
+from synapse.util.stringutils import random_string
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class RoomBatchHandler:
+ def __init__(self, hs: "HomeServer"):
+ self.hs = hs
+ self.store = hs.get_datastore()
+ self.state_store = hs.get_storage().state
+ self.event_creation_handler = hs.get_event_creation_handler()
+ self.room_member_handler = hs.get_room_member_handler()
+ self.auth = hs.get_auth()
+
+ async def inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int:
+ """Finds the depth which would sort it after the most-recent
+ prev_event_id but before the successors of those events. If no
+ successors are found, we assume it's an historical extremity part of the
+ current batch and use the same depth of the prev_event_ids.
+
+ Args:
+ prev_event_ids: List of prev event IDs
+
+ Returns:
+ Inherited depth
+ """
+ (
+ most_recent_prev_event_id,
+ most_recent_prev_event_depth,
+ ) = await self.store.get_max_depth_of(prev_event_ids)
+
+ # We want to insert the historical event after the `prev_event` but before the successor event
+ #
+ # We inherit depth from the successor event instead of the `prev_event`
+ # because events returned from `/messages` are first sorted by `topological_ordering`
+ # which is just the `depth` and then tie-break with `stream_ordering`.
+ #
+ # We mark these inserted historical events as "backfilled" which gives them a
+ # negative `stream_ordering`. If we use the same depth as the `prev_event`,
+ # then our historical event will tie-break and be sorted before the `prev_event`
+ # when it should come after.
+ #
+ # We want to use the successor event depth so they appear after `prev_event` because
+ # it has a larger `depth` but before the successor event because the `stream_ordering`
+ # is negative before the successor event.
+ successor_event_ids = await self.store.get_successor_events(
+ [most_recent_prev_event_id]
+ )
+
+ # If we can't find any successor events, then it's a forward extremity of
+ # historical messages and we can just inherit from the previous historical
+ # event which we can already assume has the correct depth where we want
+ # to insert into.
+ if not successor_event_ids:
+ depth = most_recent_prev_event_depth
+ else:
+ (
+ _,
+ oldest_successor_depth,
+ ) = await self.store.get_min_depth_of(successor_event_ids)
+
+ depth = oldest_successor_depth
+
+ return depth
+
+ def create_insertion_event_dict(
+ self, sender: str, room_id: str, origin_server_ts: int
+ ) -> JsonDict:
+ """Creates an event dict for an "insertion" event with the proper fields
+ and a random batch ID.
+
+ Args:
+ sender: The event author MXID
+ room_id: The room ID that the event belongs to
+ origin_server_ts: Timestamp when the event was sent
+
+ Returns:
+ The new event dictionary to insert.
+ """
+
+ next_batch_id = random_string(8)
+ insertion_event = {
+ "type": EventTypes.MSC2716_INSERTION,
+ "sender": sender,
+ "room_id": room_id,
+ "content": {
+ EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id,
+ EventContentFields.MSC2716_HISTORICAL: True,
+ },
+ "origin_server_ts": origin_server_ts,
+ }
+
+ return insertion_event
+
+ async def create_requester_for_user_id_from_app_service(
+ self, user_id: str, app_service: ApplicationService
+ ) -> Requester:
+ """Creates a new requester for the given user_id
+ and validates that the app service is allowed to control
+ the given user.
+
+ Args:
+ user_id: The author MXID that the app service is controlling
+ app_service: The app service that controls the user
+
+ Returns:
+ Requester object
+ """
+
+ await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
+
+ return create_requester(user_id, app_service=app_service)
+
+ async def get_most_recent_auth_event_ids_from_event_id_list(
+ self, event_ids: List[str]
+ ) -> List[str]:
+ """Find the most recent auth event ids (derived from state events) that
+ allowed that message to be sent. We will use this as a base
+ to auth our historical messages against.
+
+ Args:
+ event_ids: List of event ID's to look at
+
+ Returns:
+ List of event ID's
+ """
+
+ (
+ most_recent_prev_event_id,
+ _,
+ ) = await self.store.get_max_depth_of(event_ids)
+ # mapping from (type, state_key) -> state_event_id
+ prev_state_map = await self.state_store.get_state_ids_for_event(
+ most_recent_prev_event_id
+ )
+ # List of state event ID's
+ prev_state_ids = list(prev_state_map.values())
+ auth_event_ids = prev_state_ids
+
+ return auth_event_ids
+
+ async def persist_state_events_at_start(
+ self,
+ state_events_at_start: List[JsonDict],
+ room_id: str,
+ initial_auth_event_ids: List[str],
+ app_service_requester: Requester,
+ ) -> List[str]:
+ """Takes all `state_events_at_start` event dictionaries and creates/persists
+ them as floating state events which don't resolve into the current room state.
+ They are floating because they reference a fake prev_event which doesn't connect
+ to the normal DAG at all.
+
+ Args:
+ state_events_at_start:
+ room_id: Room where you want the events persisted in.
+ initial_auth_event_ids: These will be the auth_events for the first
+ state event created. Each event created afterwards will be
+ added to the list of auth events for the next state event
+ created.
+ app_service_requester: The requester of an application service.
+
+ Returns:
+ List of state event ID's we just persisted
+ """
+ assert app_service_requester.app_service
+
+ state_event_ids_at_start = []
+ auth_event_ids = initial_auth_event_ids.copy()
+ for state_event in state_events_at_start:
+ assert_params_in_dict(
+ state_event, ["type", "origin_server_ts", "content", "sender"]
+ )
+
+ logger.debug(
+ "RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s",
+ state_event,
+ auth_event_ids,
+ )
+
+ event_dict = {
+ "type": state_event["type"],
+ "origin_server_ts": state_event["origin_server_ts"],
+ "content": state_event["content"],
+ "room_id": room_id,
+ "sender": state_event["sender"],
+ "state_key": state_event["state_key"],
+ }
+
+ # Mark all events as historical
+ event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
+
+ # Make the state events float off on their own so we don't have a
+ # bunch of `@mxid joined the room` noise between each batch
+ fake_prev_event_id = "$" + random_string(43)
+
+ # TODO: This is pretty much the same as some other code to handle inserting state in this file
+ if event_dict["type"] == EventTypes.Member:
+ membership = event_dict["content"].get("membership", None)
+ event_id, _ = await self.room_member_handler.update_membership(
+ await self.create_requester_for_user_id_from_app_service(
+ state_event["sender"], app_service_requester.app_service
+ ),
+ target=UserID.from_string(event_dict["state_key"]),
+ room_id=room_id,
+ action=membership,
+ content=event_dict["content"],
+ outlier=True,
+ prev_event_ids=[fake_prev_event_id],
+ # Make sure to use a copy of this list because we modify it
+ # later in the loop here. Otherwise it will be the same
+ # reference and also update in the event when we append later.
+ auth_event_ids=auth_event_ids.copy(),
+ )
+ else:
+ # TODO: Add some complement tests that adds state that is not member joins
+ # and will use this code path. Maybe we only want to support join state events
+ # and can get rid of this `else`?
+ (
+ event,
+ _,
+ ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ await self.create_requester_for_user_id_from_app_service(
+ state_event["sender"], app_service_requester.app_service
+ ),
+ event_dict,
+ outlier=True,
+ prev_event_ids=[fake_prev_event_id],
+ # Make sure to use a copy of this list because we modify it
+ # later in the loop here. Otherwise it will be the same
+ # reference and also update in the event when we append later.
+ auth_event_ids=auth_event_ids.copy(),
+ )
+ event_id = event.event_id
+
+ state_event_ids_at_start.append(event_id)
+ auth_event_ids.append(event_id)
+
+ return state_event_ids_at_start
+
+ async def persist_historical_events(
+ self,
+ events_to_create: List[JsonDict],
+ room_id: str,
+ initial_prev_event_ids: List[str],
+ inherited_depth: int,
+ auth_event_ids: List[str],
+ app_service_requester: Requester,
+ ) -> List[str]:
+ """Create and persists all events provided sequentially. Handles the
+ complexity of creating events in chronological order so they can
+ reference each other by prev_event but still persists in
+ reverse-chronoloical order so they have the correct
+ (topological_ordering, stream_ordering) and sort correctly from
+ /messages.
+
+ Args:
+ events_to_create: List of historical events to create in JSON
+ dictionary format.
+ room_id: Room where you want the events persisted in.
+ initial_prev_event_ids: These will be the prev_events for the first
+ event created. Each event created afterwards will point to the
+ previous event created.
+ inherited_depth: The depth to create the events at (you will
+ probably by calling inherit_depth_from_prev_ids(...)).
+ auth_event_ids: Define which events allow you to create the given
+ event in the room.
+ app_service_requester: The requester of an application service.
+
+ Returns:
+ List of persisted event IDs
+ """
+ assert app_service_requester.app_service
+
+ prev_event_ids = initial_prev_event_ids.copy()
+
+ event_ids = []
+ events_to_persist = []
+ for ev in events_to_create:
+ assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
+
+ event_dict = {
+ "type": ev["type"],
+ "origin_server_ts": ev["origin_server_ts"],
+ "content": ev["content"],
+ "room_id": room_id,
+ "sender": ev["sender"], # requester.user.to_string(),
+ "prev_events": prev_event_ids.copy(),
+ }
+
+ # Mark all events as historical
+ event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
+
+ event, context = await self.event_creation_handler.create_event(
+ await self.create_requester_for_user_id_from_app_service(
+ ev["sender"], app_service_requester.app_service
+ ),
+ event_dict,
+ prev_event_ids=event_dict.get("prev_events"),
+ auth_event_ids=auth_event_ids,
+ historical=True,
+ depth=inherited_depth,
+ )
+ logger.debug(
+ "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
+ event,
+ prev_event_ids,
+ auth_event_ids,
+ )
+
+ assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
+ event.sender,
+ )
+
+ events_to_persist.append((event, context))
+ event_id = event.event_id
+
+ event_ids.append(event_id)
+ prev_event_ids = [event_id]
+
+ # Persist events in reverse-chronological order so they have the
+ # correct stream_ordering as they are backfilled (which decrements).
+ # Events are sorted by (topological_ordering, stream_ordering)
+ # where topological_ordering is just depth.
+ for (event, context) in reversed(events_to_persist):
+ await self.event_creation_handler.handle_new_client_event(
+ await self.create_requester_for_user_id_from_app_service(
+ event["sender"], app_service_requester.app_service
+ ),
+ event=event,
+ context=context,
+ )
+
+ return event_ids
+
+ async def handle_batch_of_events(
+ self,
+ events_to_create: List[JsonDict],
+ room_id: str,
+ batch_id_to_connect_to: str,
+ initial_prev_event_ids: List[str],
+ inherited_depth: int,
+ auth_event_ids: List[str],
+ app_service_requester: Requester,
+ ) -> Tuple[List[str], str]:
+ """
+ Handles creating and persisting all of the historical events as well
+ as insertion and batch meta events to make the batch navigable in the DAG.
+
+ Args:
+ events_to_create: List of historical events to create in JSON
+ dictionary format.
+ room_id: Room where you want the events created in.
+ batch_id_to_connect_to: The batch_id from the insertion event you
+ want this batch to connect to.
+ initial_prev_event_ids: These will be the prev_events for the first
+ event created. Each event created afterwards will point to the
+ previous event created.
+ inherited_depth: The depth to create the events at (you will
+ probably by calling inherit_depth_from_prev_ids(...)).
+ auth_event_ids: Define which events allow you to create the given
+ event in the room.
+ app_service_requester: The requester of an application service.
+
+ Returns:
+ Tuple containing a list of created events and the next_batch_id
+ """
+
+ # Connect this current batch to the insertion event from the previous batch
+ last_event_in_batch = events_to_create[-1]
+ batch_event = {
+ "type": EventTypes.MSC2716_BATCH,
+ "sender": app_service_requester.user.to_string(),
+ "room_id": room_id,
+ "content": {
+ EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to,
+ EventContentFields.MSC2716_HISTORICAL: True,
+ },
+ # Since the batch event is put at the end of the batch,
+ # where the newest-in-time event is, copy the origin_server_ts from
+ # the last event we're inserting
+ "origin_server_ts": last_event_in_batch["origin_server_ts"],
+ }
+ # Add the batch event to the end of the batch (newest-in-time)
+ events_to_create.append(batch_event)
+
+ # Add an "insertion" event to the start of each batch (next to the oldest-in-time
+ # event in the batch) so the next batch can be connected to this one.
+ insertion_event = self.create_insertion_event_dict(
+ sender=app_service_requester.user.to_string(),
+ room_id=room_id,
+ # Since the insertion event is put at the start of the batch,
+ # where the oldest-in-time event is, copy the origin_server_ts from
+ # the first event we're inserting
+ origin_server_ts=events_to_create[0]["origin_server_ts"],
+ )
+ next_batch_id = insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_BATCH_ID
+ ]
+ # Prepend the insertion event to the start of the batch (oldest-in-time)
+ events_to_create = [insertion_event] + events_to_create
+
+ # Create and persist all of the historical events
+ event_ids = await self.persist_historical_events(
+ events_to_create=events_to_create,
+ room_id=room_id,
+ initial_prev_event_ids=initial_prev_event_ids,
+ inherited_depth=inherited_depth,
+ auth_event_ids=auth_event_ids,
+ app_service_requester=app_service_requester,
+ )
+
+ return event_ids, next_batch_id
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index c3d4199ed1..ba7a14d651 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -36,8 +36,6 @@ from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util.caches.descriptors import _CacheContext, cached
from synapse.util.caches.response_cache import ResponseCache
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -49,9 +47,10 @@ REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
-class RoomListHandler(BaseHandler):
+class RoomListHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.hs = hs
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.response_cache: ResponseCache[
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index dc1202f8d8..174a743f6a 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -51,8 +51,6 @@ from synapse.types import (
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -90,8 +88,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
- self._enable_lookup = hs.config.enable_3pid_lookup
- self.allow_per_room_profiles = self.config.allow_per_room_profiles
+ self._enable_lookup = hs.config.registration.enable_3pid_lookup
+ self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
store=self.store,
@@ -119,9 +117,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
- # This is only used to get at the ratelimit function. It's fine there are
- # multiple of these as it doesn't store state.
- self.base_handler = BaseHandler(hs)
+ self.request_ratelimiter = hs.get_request_ratelimiter()
@abc.abstractmethod
async def _remote_join(
@@ -435,6 +431,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
+ new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
@@ -452,6 +449,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed: Information from a 3PID invite.
ratelimit: Whether to rate limit the request.
content: The content of the created event.
+ new_room: Whether the membership update is happening in the context of a room
+ creation.
require_consent: Whether consent is required.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
@@ -505,6 +504,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
+ new_room=new_room,
require_consent=require_consent,
outlier=outlier,
prev_event_ids=prev_event_ids,
@@ -524,6 +524,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
+ new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
@@ -543,6 +544,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed:
ratelimit:
content:
+ new_room: Whether the membership update is happening in the context of a room
+ creation.
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
@@ -645,7 +648,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
- if self.config.block_non_admin_invites:
+ if self.config.server.block_non_admin_invites:
logger.info(
"Blocking invite: user is not admin and non-admin "
"invites disabled"
@@ -746,6 +749,30 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
+ # Figure out whether the user is a server admin to determine whether they
+ # should be able to bypass the spam checker.
+ if (
+ self._server_notices_mxid is not None
+ and requester.user.to_string() == self._server_notices_mxid
+ ):
+ # allow the server notices mxid to join rooms
+ bypass_spam_checker = True
+
+ else:
+ bypass_spam_checker = await self.auth.is_server_admin(requester.user)
+
+ inviter = await self._get_inviter(target.to_string(), room_id)
+ if (
+ not bypass_spam_checker
+ # We assume that if the spam checker allowed the user to create
+ # a room then they're allowed to join it.
+ and not new_room
+ and not await self.spam_checker.user_may_join_room(
+ target.to_string(), room_id, is_invited=inviter is not None
+ )
+ ):
+ raise SynapseError(403, "Not allowed to join this room")
+
# Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join(
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
@@ -1250,7 +1277,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
- if self.config.block_non_admin_invites:
+ if self.config.server.block_non_admin_invites:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
raise SynapseError(
@@ -1264,7 +1291,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
- await self.base_handler.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
@@ -1288,10 +1315,22 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if invitee:
# Note that update_membership with an action of "invite" can raise
# a ShadowBanError, but this was done above already.
+ # We don't check the invite against the spamchecker(s) here (through
+ # user_may_invite) because we'll do it further down the line anyway (in
+ # update_membership_locked).
_, stream_id = await self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
)
else:
+ # Check if the spamchecker(s) allow this invite to go through.
+ if not await self.spam_checker.user_may_send_3pid_invite(
+ inviter_userid=requester.user.to_string(),
+ medium=medium,
+ address=address,
+ room_id=room_id,
+ ):
+ raise SynapseError(403, "Cannot send threepid invite")
+
stream_id = await self._make_and_store_3pid_invite(
requester,
id_server,
@@ -1448,7 +1487,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
- max_complexity = self.hs.config.limit_remote_rooms.complexity
+ max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
@@ -1464,7 +1503,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
Args:
room_id: The room ID to check for complexity.
"""
- max_complexity = self.hs.config.limit_remote_rooms.complexity
+ max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.store.get_room_complexity(room_id)
return complexity["v1"] > max_complexity
@@ -1488,8 +1527,11 @@ class RoomMemberMasterHandler(RoomMemberHandler):
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
- check_complexity = self.hs.config.limit_remote_rooms.enabled
- if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
+ check_complexity = self.hs.config.server.limit_remote_rooms.enabled
+ if (
+ check_complexity
+ and self.hs.config.server.limit_remote_rooms.admins_can_join
+ ):
check_complexity = not await self.auth.is_server_admin(user)
if check_complexity:
@@ -1500,7 +1542,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
if too_complex is True:
raise SynapseError(
code=400,
- msg=self.hs.config.limit_remote_rooms.complexity_error,
+ msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
@@ -1535,7 +1577,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
)
raise SynapseError(
code=400,
- msg=self.hs.config.limit_remote_rooms.complexity_error,
+ msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 2fed9f377a..727d75a50c 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -22,7 +22,6 @@ from saml2.client import Saml2Client
from synapse.api.errors import SynapseError
from synapse.config import ConfigError
-from synapse.handlers._base import BaseHandler
from synapse.handlers.sso import MappingException, UserAttributes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
@@ -51,9 +50,11 @@ class Saml2SessionData:
ui_auth_session_id: Optional[str] = None
-class SamlHandler(BaseHandler):
+class SamlHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ self.server_name = hs.hostname
self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config)
self._saml_idp_entityid = hs.config.saml2.saml2_idp_entityid
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 8226d6f5a1..a3ffa26be8 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -26,17 +26,18 @@ from synapse.storage.state import StateFilter
from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class SearchHandler(BaseHandler):
+class SearchHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.state_handler = hs.get_state_handler()
+ self.clock = hs.get_clock()
+ self.hs = hs
self._event_serializer = hs.get_event_client_serializer()
self.storage = hs.get_storage()
self.state_store = self.storage.state
@@ -105,7 +106,7 @@ class SearchHandler(BaseHandler):
dict to be returned to the client with results of search
"""
- if not self.hs.config.enable_search:
+ if not self.hs.config.server.enable_search:
raise SynapseError(400, "Search is disabled on this homeserver")
batch_group = None
diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py
index 25e6b012b7..1a062a784c 100644
--- a/synapse/handlers/send_email.py
+++ b/synapse/handlers/send_email.py
@@ -105,8 +105,13 @@ async def _sendmail(
# set to enable TLS.
factory = build_sender_factory(hostname=smtphost if enable_tls else None)
- # the IReactorTCP interface claims host has to be a bytes, which seems to be wrong
- reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type]
+ reactor.connectTCP(
+ smtphost, # type: ignore[arg-type]
+ smtpport,
+ factory,
+ timeout=30,
+ bindAddress=None,
+ )
await make_deferred_yieldable(d)
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index a63fac8283..706ad72761 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -17,19 +17,17 @@ from typing import TYPE_CHECKING, Optional
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.types import Requester
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class SetPasswordHandler(BaseHandler):
+class SetPasswordHandler:
"""Handler which deals with changing user account passwords"""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index 8f5d465fa1..184730ebe8 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -153,21 +153,23 @@ class _BaseThreepidAuthChecker:
# msisdns are currently always ThreepidBehaviour.REMOTE
if medium == "msisdn":
- if not self.hs.config.account_threepid_delegate_msisdn:
+ if not self.hs.config.registration.account_threepid_delegate_msisdn:
raise SynapseError(
400, "Phone number verification is not enabled on this homeserver"
)
threepid = await identity_handler.threepid_from_creds(
- self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_msisdn,
+ threepid_creds,
)
elif medium == "email":
if (
self.hs.config.email.threepid_behaviour_email
== ThreepidBehaviour.REMOTE
):
- assert self.hs.config.account_threepid_delegate_email
+ assert self.hs.config.registration.account_threepid_delegate_email
threepid = await identity_handler.threepid_from_creds(
- self.hs.config.account_threepid_delegate_email, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_email,
+ threepid_creds,
)
elif (
self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
@@ -240,7 +242,7 @@ class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
_BaseThreepidAuthChecker.__init__(self, hs)
def is_enabled(self) -> bool:
- return bool(self.hs.config.account_threepid_delegate_msisdn)
+ return bool(self.hs.config.registration.account_threepid_delegate_msisdn)
async def check_auth(self, authdict: dict, clientip: str) -> Any:
return await self._check_threepid("msisdn", authdict)
@@ -252,7 +254,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
- self._enabled = bool(hs.config.registration_requires_token)
+ self._enabled = bool(hs.config.registration.registration_requires_token)
self.store = hs.get_datastore()
def is_enabled(self) -> bool:
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index b91e7cb501..8810f048ba 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -60,7 +60,7 @@ class UserDirectoryHandler(StateDeltasHandler):
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
- self.update_user_directory = hs.config.update_user_directory
+ self.update_user_directory = hs.config.server.update_user_directory
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
self.spam_checker = hs.get_spam_checker()
# The current position in the current_state_delta stream
@@ -132,12 +132,7 @@ class UserDirectoryHandler(StateDeltasHandler):
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
- # Support users are for diagnostics and should not appear in the user directory.
- is_support = await self.store.is_support_user(user_id)
- # When change profile information of deactivated user it should not appear in the user directory.
- is_deactivated = await self.store.get_user_deactivated_status(user_id)
-
- if not (is_support or is_deactivated):
+ if await self.store.should_include_local_user_in_dir(user_id):
await self.store.update_profile_in_user_dir(
user_id, profile.display_name, profile.avatar_url
)
@@ -208,6 +203,7 @@ class UserDirectoryHandler(StateDeltasHandler):
public_value=Membership.JOIN,
)
+ is_remote = not self.is_mine_id(state_key)
if change is MatchChange.now_false:
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
@@ -225,32 +221,36 @@ class UserDirectoryHandler(StateDeltasHandler):
for user_id in user_ids:
await self._handle_remove_user(room_id, user_id)
- return
+ continue
else:
logger.debug("Server is still in room: %r", room_id)
- is_support = await self.store.is_support_user(state_key)
- if not is_support:
+ include_in_dir = (
+ is_remote
+ or await self.store.should_include_local_user_in_dir(state_key)
+ )
+ if include_in_dir:
if change is MatchChange.no_change:
- # Handle any profile changes
- await self._handle_profile_change(
- state_key, room_id, prev_event_id, event_id
- )
+ # Handle any profile changes for remote users.
+ # (For local users we are not forced to scan membership
+ # events; instead the rest of the application calls
+ # `handle_local_profile_change`.)
+ if is_remote:
+ await self._handle_profile_change(
+ state_key, room_id, prev_event_id, event_id
+ )
continue
if change is MatchChange.now_true: # The user joined
- event = await self.store.get_event(event_id, allow_none=True)
- # It isn't expected for this event to not exist, but we
- # don't want the entire background process to break.
- if event is None:
- continue
-
- profile = ProfileInfo(
- avatar_url=event.content.get("avatar_url"),
- display_name=event.content.get("displayname"),
- )
-
- await self._handle_new_user(room_id, state_key, profile)
+ # This may be the first time we've seen a remote user. If
+ # so, ensure we have a directory entry for them. (We don't
+ # need to do this for local users: their directory entry
+ # is created at the point of registration.
+ if is_remote:
+ await self._upsert_directory_entry_for_remote_user(
+ state_key, event_id
+ )
+ await self._track_user_joined_room(room_id, state_key)
else: # The user left
await self._handle_remove_user(room_id, state_key)
else:
@@ -300,7 +300,7 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id
)
- logger.debug("Change: %r, publicness: %r", publicness, is_public)
+ logger.debug("Publicness change: %r, is_public: %r", publicness, is_public)
if publicness is MatchChange.now_true and not is_public:
# If we became world readable but room isn't currently public then
@@ -311,42 +311,50 @@ class UserDirectoryHandler(StateDeltasHandler):
# ignore the change
return
- other_users_in_room_with_profiles = (
- await self.store.get_users_in_room_with_profiles(room_id)
- )
+ users_in_room = await self.store.get_users_in_room(room_id)
# Remove every user from the sharing tables for that room.
- for user_id in other_users_in_room_with_profiles.keys():
+ for user_id in users_in_room:
await self.store.remove_user_who_share_room(user_id, room_id)
# Then, re-add them to the tables.
- # NOTE: this is not the most efficient method, as handle_new_user sets
+ # NOTE: this is not the most efficient method, as _track_user_joined_room sets
# up local_user -> other_user and other_user_whos_local -> local_user,
# which when ran over an entire room, will result in the same values
# being added multiple times. The batching upserts shouldn't make this
# too bad, though.
- for user_id, profile in other_users_in_room_with_profiles.items():
- await self._handle_new_user(room_id, user_id, profile)
+ for user_id in users_in_room:
+ await self._track_user_joined_room(room_id, user_id)
- async def _handle_new_user(
- self, room_id: str, user_id: str, profile: ProfileInfo
+ async def _upsert_directory_entry_for_remote_user(
+ self, user_id: str, event_id: str
) -> None:
- """Called when we might need to add user to directory
-
- Args:
- room_id: The room ID that user joined or started being public
- user_id
+ """A remote user has just joined a room. Ensure they have an entry in
+ the user directory. The caller is responsible for making sure they're
+ remote.
"""
+ event = await self.store.get_event(event_id, allow_none=True)
+ # It isn't expected for this event to not exist, but we
+ # don't want the entire background process to break.
+ if event is None:
+ return
+
logger.debug("Adding new user to dir, %r", user_id)
await self.store.update_profile_in_user_dir(
- user_id, profile.display_name, profile.avatar_url
+ user_id, event.content.get("displayname"), event.content.get("avatar_url")
)
+ async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
+ """Someone's just joined a room. Update `users_in_public_rooms` or
+ `users_who_share_private_rooms` as appropriate.
+
+ The caller is responsible for ensuring that the given user is not excluded
+ from the user directory.
+ """
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
room_id
)
- # Now we update users who share rooms with users.
other_users_in_room = await self.store.get_users_in_room(room_id)
if is_public:
@@ -356,13 +364,7 @@ class UserDirectoryHandler(StateDeltasHandler):
# First, if they're our user then we need to update for every user
if self.is_mine_id(user_id):
-
- is_appservice = self.store.get_if_app_services_interested_in_user(
- user_id
- )
-
- # We don't care about appservice users.
- if not is_appservice:
+ if await self.store.should_include_local_user_in_dir(user_id):
for other_user_id in other_users_in_room:
if user_id == other_user_id:
continue
@@ -374,10 +376,10 @@ class UserDirectoryHandler(StateDeltasHandler):
if user_id == other_user_id:
continue
- is_appservice = self.store.get_if_app_services_interested_in_user(
+ include_other_user = self.is_mine_id(
other_user_id
- )
- if self.is_mine_id(other_user_id) and not is_appservice:
+ ) and await self.store.should_include_local_user_in_dir(other_user_id)
+ if include_other_user:
to_insert.add((other_user_id, user_id))
if to_insert:
|