diff --git a/synapse/__init__.py b/synapse/__init__.py
index 5da6c924fc..e4302d81a8 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.39.0"
+__version__ = "1.42.0rc2"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index a986fdb47a..5f0f34119b 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -62,7 +62,7 @@ class JoinRules:
INVITE = "invite"
PRIVATE = "private"
# As defined for MSC3083.
- MSC3083_RESTRICTED = "restricted"
+ RESTRICTED = "restricted"
class RestrictedJoinRuleTypes:
@@ -79,6 +79,7 @@ class LoginType:
TERMS = "m.login.terms"
SSO = "m.login.sso"
DUMMY = "m.login.dummy"
+ REGISTRATION_TOKEN = "org.matrix.msc3231.login.registration_token"
# This is used in the `type` parameter for /register when called by
@@ -197,6 +198,12 @@ class EventContentFields:
# cf https://github.com/matrix-org/matrix-doc/pull/1772
ROOM_TYPE = "type"
+ # The creator of the room, as used in `m.room.create` events.
+ ROOM_CREATOR = "creator"
+
+ # Used in m.room.guest_access events.
+ GUEST_ACCESS = "guest_access"
+
# Used on normal messages to indicate they were historically imported after the fact
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
# For "insertion" events to indicate what the next chunk ID should be in
@@ -231,5 +238,11 @@ class HistoryVisibility:
WORLD_READABLE = "world_readable"
+class GuestAccess:
+ CAN_JOIN = "can_join"
+ # anything that is not "can_join" is considered "forbidden", but for completeness:
+ FORBIDDEN = "forbidden"
+
+
class ReadReceiptEventFields:
MSC2285_HIDDEN = "org.matrix.msc2285.hidden"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index dc662bca83..9480f448d7 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -147,6 +147,14 @@ class SynapseError(CodeMessageException):
return cs_error(self.msg, self.errcode)
+class InvalidAPICallError(SynapseError):
+ """You called an existing API endpoint, but fed that endpoint
+ invalid or incomplete data."""
+
+ def __init__(self, msg: str):
+ super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
+
+
class ProxiedRequestError(SynapseError):
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index bc678efe49..a19be6707a 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -70,12 +70,17 @@ class RoomVersion:
msc2176_redaction_rules = attr.ib(type=bool)
# MSC3083: Support the 'restricted' join_rule.
msc3083_join_rules = attr.ib(type=bool)
+ # MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
+ # be enabled if MSC3083 is not.
+ msc3375_redaction_rules = attr.ib(type=bool)
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
# m.room.membership event with membership 'knock'.
msc2403_knocking = attr.ib(type=bool)
# MSC2716: Adds m.room.power_levels -> content.historical field to control
# whether "insertion", "chunk", "marker" events can be sent
msc2716_historical = attr.ib(type=bool)
+ # MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
+ msc2716_redactions = attr.ib(type=bool)
class RoomVersions:
@@ -90,8 +95,10 @@ class RoomVersions:
limit_notifications_power_levels=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
V2 = RoomVersion(
"2",
@@ -104,8 +111,10 @@ class RoomVersions:
limit_notifications_power_levels=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
V3 = RoomVersion(
"3",
@@ -118,8 +127,10 @@ class RoomVersions:
limit_notifications_power_levels=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
V4 = RoomVersion(
"4",
@@ -132,8 +143,10 @@ class RoomVersions:
limit_notifications_power_levels=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
V5 = RoomVersion(
"5",
@@ -146,8 +159,10 @@ class RoomVersions:
limit_notifications_power_levels=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
V6 = RoomVersion(
"6",
@@ -160,8 +175,10 @@ class RoomVersions:
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
@@ -174,12 +191,30 @@ class RoomVersions:
limit_notifications_power_levels=True,
msc2176_redaction_rules=True,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
+ msc2716_redactions=False,
)
- MSC3083 = RoomVersion(
- "org.matrix.msc3083.v2",
- RoomDisposition.UNSTABLE,
+ V7 = RoomVersion(
+ "7",
+ RoomDisposition.STABLE,
+ EventFormatVersions.V3,
+ StateResolutionVersions.V2,
+ enforce_key_validity=True,
+ special_case_aliases_auth=False,
+ strict_canonicaljson=True,
+ limit_notifications_power_levels=True,
+ msc2176_redaction_rules=False,
+ msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
+ msc2403_knocking=True,
+ msc2716_historical=False,
+ msc2716_redactions=False,
+ )
+ V8 = RoomVersion(
+ "8",
+ RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
@@ -188,11 +223,13 @@ class RoomVersions:
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
- msc2403_knocking=False,
+ msc3375_redaction_rules=False,
+ msc2403_knocking=True,
msc2716_historical=False,
+ msc2716_redactions=False,
)
- V7 = RoomVersion(
- "7",
+ V9 = RoomVersion(
+ "9",
RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
@@ -201,13 +238,31 @@ class RoomVersions:
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
- msc3083_join_rules=False,
+ msc3083_join_rules=True,
+ msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
+ msc2716_redactions=False,
)
MSC2716 = RoomVersion(
"org.matrix.msc2716",
- RoomDisposition.STABLE,
+ RoomDisposition.UNSTABLE,
+ EventFormatVersions.V3,
+ StateResolutionVersions.V2,
+ enforce_key_validity=True,
+ special_case_aliases_auth=False,
+ strict_canonicaljson=True,
+ limit_notifications_power_levels=True,
+ msc2176_redaction_rules=False,
+ msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
+ msc2403_knocking=True,
+ msc2716_historical=True,
+ msc2716_redactions=False,
+ )
+ MSC2716v2 = RoomVersion(
+ "org.matrix.msc2716v2",
+ RoomDisposition.UNSTABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
@@ -216,8 +271,10 @@ class RoomVersions:
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=True,
+ msc2716_redactions=True,
)
@@ -231,9 +288,10 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V5,
RoomVersions.V6,
RoomVersions.MSC2176,
- RoomVersions.MSC3083,
RoomVersions.V7,
RoomVersions.MSC2716,
+ RoomVersions.V8,
+ RoomVersions.V9,
)
}
@@ -266,7 +324,7 @@ MSC3244_CAPABILITIES = {
),
RoomVersionCapability(
"restricted",
- None,
+ RoomVersions.V8,
lambda room_version: room_version.msc3083_join_rules,
),
)
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 50a02f51f5..89bda00090 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import atexit
import gc
import logging
import os
@@ -36,7 +37,9 @@ from synapse.api.constants import MAX_PDU_SIZE
from synapse.app import check_bind_error
from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config.homeserver import HomeServerConfig
+from synapse.config.server import ManholeConfig
from synapse.crypto import context_factory
+from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.logging.context import PreserveLoggingContext
@@ -228,7 +231,12 @@ def listen_metrics(bind_addresses, port):
start_http_server(port, addr=host, registry=RegistryProxy)
-def listen_manhole(bind_addresses: Iterable[str], port: int, manhole_globals: dict):
+def listen_manhole(
+ bind_addresses: Iterable[str],
+ port: int,
+ manhole_settings: ManholeConfig,
+ manhole_globals: dict,
+):
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
# suppress the warning for now.
@@ -243,7 +251,7 @@ def listen_manhole(bind_addresses: Iterable[str], port: int, manhole_globals: di
listen_tcp(
bind_addresses,
port,
- manhole(username="matrix", password="rabbithole", globals=manhole_globals),
+ manhole(settings=manhole_settings, globals=manhole_globals),
)
@@ -370,6 +378,7 @@ async def start(hs: "HomeServer"):
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
+ load_legacy_presence_router(hs)
# If we've configured an expiry time for caches, start the background job now.
setup_expire_lru_cache_entries(hs)
@@ -401,6 +410,12 @@ async def start(hs: "HomeServer"):
gc.collect()
gc.freeze()
+ # Speed up shutdowns by freezing all allocated objects. This moves everything
+ # into the permanent generation and excludes them from the final GC.
+ # Unfortunately only works on Python 3.7
+ if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
+ atexit.register(gc.freeze)
+
def setup_sentry(hs):
"""Enable sentry integration, if enabled in configuration
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 3234d9ebba..7396db93c6 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -38,7 +38,6 @@ from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
from synapse.server import HomeServer
from synapse.util.logcontext import LoggingContext
from synapse.util.versionstring import get_version_string
@@ -58,7 +57,6 @@ class AdminCmdSlavedStore(
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
- RoomStore,
BaseSlavedStore,
):
pass
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 3b7131af8f..2eb8d5a79c 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -64,42 +64,44 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
from synapse.rest.admin import register_servlets_for_media_repo
-from synapse.rest.client.v1 import events, login, presence, room
-from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
-from synapse.rest.client.v1.profile import (
- ProfileAvatarURLRestServlet,
- ProfileDisplaynameRestServlet,
- ProfileRestServlet,
-)
-from synapse.rest.client.v1.push_rule import PushRuleRestServlet
-from synapse.rest.client.v1.voip import VoipRestServlet
-from synapse.rest.client.v2_alpha import (
+from synapse.rest.client import (
account_data,
+ events,
groups,
+ login,
+ presence,
read_marker,
receipts,
+ room,
room_keys,
sync,
tags,
user_directory,
)
-from synapse.rest.client.v2_alpha._base import client_patterns
-from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
-from synapse.rest.client.v2_alpha.account_data import (
- AccountDataServlet,
- RoomAccountDataServlet,
-)
-from synapse.rest.client.v2_alpha.devices import DevicesRestServlet
-from synapse.rest.client.v2_alpha.keys import (
+from synapse.rest.client._base import client_patterns
+from synapse.rest.client.account import ThreepidRestServlet
+from synapse.rest.client.account_data import AccountDataServlet, RoomAccountDataServlet
+from synapse.rest.client.devices import DevicesRestServlet
+from synapse.rest.client.initial_sync import InitialSyncRestServlet
+from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
OneTimeKeyServlet,
)
-from synapse.rest.client.v2_alpha.register import RegisterRestServlet
-from synapse.rest.client.v2_alpha.sendtodevice import SendToDeviceRestServlet
+from synapse.rest.client.profile import (
+ ProfileAvatarURLRestServlet,
+ ProfileDisplaynameRestServlet,
+ ProfileRestServlet,
+)
+from synapse.rest.client.push_rule import PushRuleRestServlet
+from synapse.rest.client.register import (
+ RegisterRestServlet,
+ RegistrationTokenValidityRestServlet,
+)
+from synapse.rest.client.sendtodevice import SendToDeviceRestServlet
from synapse.rest.client.versions import VersionsRestServlet
+from synapse.rest.client.voip import VoipRestServlet
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@@ -114,7 +116,9 @@ from synapse.storage.databases.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.databases.main.presence import PresenceStore
+from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.search import SearchStore
+from synapse.storage.databases.main.session import SessionStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
@@ -237,7 +241,7 @@ class GenericWorkerSlavedStore(
ClientIpWorkerStore,
SlavedEventStore,
SlavedKeyStore,
- RoomStore,
+ RoomWorkerStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
@@ -250,6 +254,7 @@ class GenericWorkerSlavedStore(
SearchStore,
TransactionWorkerStore,
LockStore,
+ SessionStore,
BaseSlavedStore,
):
pass
@@ -279,6 +284,7 @@ class GenericWorkerServer(HomeServer):
resource = JsonResource(self, canonical_json=False)
RegisterRestServlet(self).register(resource)
+ RegistrationTokenValidityRestServlet(self).register(resource)
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
@@ -389,7 +395,10 @@ class GenericWorkerServer(HomeServer):
self._listen_http(listener)
elif listener.type == "manhole":
_base.listen_manhole(
- listener.bind_addresses, listener.port, manhole_globals={"hs": self}
+ listener.bind_addresses,
+ listener.port,
+ manhole_settings=self.config.server.manhole_settings,
+ manhole_globals={"hs": self},
)
elif listener.type == "metrics":
if not self.config.enable_metrics:
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 7dae163c1a..708db86f5d 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -291,7 +291,10 @@ class SynapseHomeServer(HomeServer):
)
elif listener.type == "manhole":
_base.listen_manhole(
- listener.bind_addresses, listener.port, manhole_globals={"hs": self}
+ listener.bind_addresses,
+ listener.port,
+ manhole_settings=self.config.server.manhole_settings,
+ manhole_globals={"hs": self},
)
elif listener.type == "replication":
services = listen_tcp(
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index d6ec618f8f..2cc242782a 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -237,13 +237,14 @@ class Config:
def read_templates(
self,
filenames: List[str],
- custom_template_directory: Optional[str] = None,
+ custom_template_directories: Optional[Iterable[str]] = None,
) -> List[jinja2.Template]:
"""Load a list of template files from disk using the given variables.
This function will attempt to load the given templates from the default Synapse
- template directory. If `custom_template_directory` is supplied, that directory
- is tried first.
+ template directory. If `custom_template_directories` is supplied, any directory
+ in this list is tried (in the order they appear in the list) before trying
+ Synapse's default directory.
Files read are treated as Jinja templates. The templates are not rendered yet
and have autoescape enabled.
@@ -251,8 +252,8 @@ class Config:
Args:
filenames: A list of template filenames to read.
- custom_template_directory: A directory to try to look for the templates
- before using the default Synapse template directory instead.
+ custom_template_directories: A list of directory to try to look for the
+ templates before using the default Synapse template directory instead.
Raises:
ConfigError: if the file's path is incorrect or otherwise cannot be read.
@@ -260,20 +261,26 @@ class Config:
Returns:
A list of jinja2 templates.
"""
- search_directories = [self.default_template_dir]
-
- # The loader will first look in the custom template directory (if specified) for the
- # given filename. If it doesn't find it, it will use the default template dir instead
- if custom_template_directory:
- # Check that the given template directory exists
- if not self.path_exists(custom_template_directory):
- raise ConfigError(
- "Configured template directory does not exist: %s"
- % (custom_template_directory,)
- )
+ search_directories = []
+
+ # The loader will first look in the custom template directories (if specified)
+ # for the given filename. If it doesn't find it, it will use the default
+ # template dir instead.
+ if custom_template_directories is not None:
+ for custom_template_directory in custom_template_directories:
+ # Check that the given template directory exists
+ if not self.path_exists(custom_template_directory):
+ raise ConfigError(
+ "Configured template directory does not exist: %s"
+ % (custom_template_directory,)
+ )
+
+ # Search the custom template directory as well
+ search_directories.append(custom_template_directory)
- # Search the custom template directory as well
- search_directories.insert(0, custom_template_directory)
+ # Append the default directory at the end of the list so Jinja can fallback on it
+ # if a template is missing from any custom directory.
+ search_directories.append(self.default_template_dir)
# TODO: switch to synapse.util.templates.build_jinja_env
loader = jinja2.FileSystemLoader(search_directories)
diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py
index 6be4eafe55..ffaffc4931 100644
--- a/synapse/config/account_validity.py
+++ b/synapse/config/account_validity.py
@@ -11,8 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
+
from synapse.config._base import Config, ConfigError
+logger = logging.getLogger(__name__)
+
+LEGACY_TEMPLATE_DIR_WARNING = """
+This server's configuration file is using the deprecated 'template_dir' setting in the
+'account_validity' section. Support for this setting has been deprecated and will be
+removed in a future version of Synapse. Server admins should instead use the new
+'custom_templates_directory' setting documented here:
+https://matrix-org.github.io/synapse/latest/templates.html
+---------------------------------------------------------------------------------------"""
+
class AccountValidityConfig(Config):
section = "account_validity"
@@ -69,6 +81,8 @@ class AccountValidityConfig(Config):
# Load account validity templates.
account_validity_template_dir = account_validity_config.get("template_dir")
+ if account_validity_template_dir is not None:
+ logger.warning(LEGACY_TEMPLATE_DIR_WARNING)
account_renewed_template_filename = account_validity_config.get(
"account_renewed_html_path", "account_renewed.html"
@@ -78,6 +92,11 @@ class AccountValidityConfig(Config):
)
# Read and store template content
+ custom_template_directories = (
+ self.root.server.custom_template_directory,
+ account_validity_template_dir,
+ )
+
(
self.account_validity_account_renewed_template,
self.account_validity_account_previously_renewed_template,
@@ -88,5 +107,5 @@ class AccountValidityConfig(Config):
"account_previously_renewed.html",
invalid_token_template_filename,
],
- account_validity_template_dir,
+ (td for td in custom_template_directories if td),
)
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 8d5f38b5d9..d119427ad8 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -151,6 +151,15 @@ class CacheConfig(Config):
# entries are never evicted based on time.
#
#expiry_time: 30m
+
+ # Controls how long the results of a /sync request are cached for after
+ # a successful response is returned. A higher duration can help clients with
+ # intermittent connections, at the cost of higher memory usage.
+ #
+ # By default, this is zero, which means that sync responses are not cached
+ # at all.
+ #
+ #sync_response_cache_duration: 2m
"""
def read_config(self, config, **kwargs):
@@ -212,6 +221,10 @@ class CacheConfig(Config):
else:
self.expiry_time_msec = None
+ self.sync_response_cache_duration = self.parse_duration(
+ cache_config.get("sync_response_cache_duration", 0)
+ )
+
# Resize all caches (if necessary) with the new factors we've loaded
self.resize_all_caches()
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 8d8f166e9b..936abe6178 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -16,6 +16,7 @@
# This file can't be called email.py because if it is, we cannot:
import email.utils
+import logging
import os
from enum import Enum
from typing import Optional
@@ -24,6 +25,8 @@ import attr
from ._base import Config, ConfigError
+logger = logging.getLogger(__name__)
+
MISSING_PASSWORD_RESET_CONFIG_ERROR = """\
Password reset emails are enabled on this homeserver due to a partial
'email' block. However, the following required keys are missing:
@@ -44,6 +47,14 @@ DEFAULT_SUBJECTS = {
"email_validation": "[%(server_name)s] Validate your email",
}
+LEGACY_TEMPLATE_DIR_WARNING = """
+This server's configuration file is using the deprecated 'template_dir' setting in the
+'email' section. Support for this setting has been deprecated and will be removed in a
+future version of Synapse. Server admins should instead use the new
+'custom_templates_directory' setting documented here:
+https://matrix-org.github.io/synapse/latest/templates.html
+---------------------------------------------------------------------------------------"""
+
@attr.s(slots=True, frozen=True)
class EmailSubjectConfig:
@@ -80,6 +91,12 @@ class EmailConfig(Config):
self.require_transport_security = email_config.get(
"require_transport_security", False
)
+ self.enable_smtp_tls = email_config.get("enable_tls", True)
+ if self.require_transport_security and not self.enable_smtp_tls:
+ raise ConfigError(
+ "email.require_transport_security requires email.enable_tls to be true"
+ )
+
if "app_name" in email_config:
self.email_app_name = email_config["app_name"]
else:
@@ -99,6 +116,9 @@ class EmailConfig(Config):
# A user-configurable template directory
template_dir = email_config.get("template_dir")
+ if template_dir is not None:
+ logger.warning(LEGACY_TEMPLATE_DIR_WARNING)
+
if isinstance(template_dir, str):
# We need an absolute path, because we change directory after starting (and
# we don't yet know what auxiliary templates like mail.css we will need).
@@ -251,7 +271,14 @@ class EmailConfig(Config):
registration_template_success_html,
add_threepid_template_success_html,
],
- template_dir,
+ (
+ td
+ for td in (
+ self.root.server.custom_template_directory,
+ template_dir,
+ )
+ if td
+ ), # Filter out template_dir if not provided
)
# Render templates that do not contain any placeholders
@@ -291,7 +318,14 @@ class EmailConfig(Config):
self.email_notif_template_text,
) = self.read_templates(
[notif_template_html, notif_template_text],
- template_dir,
+ (
+ td
+ for td in (
+ self.root.server.custom_template_directory,
+ template_dir,
+ )
+ if td
+ ), # Filter out template_dir if not provided
)
self.email_notif_for_new_users = email_config.get(
@@ -314,7 +348,14 @@ class EmailConfig(Config):
self.account_validity_template_text,
) = self.read_templates(
[expiry_template_html, expiry_template_text],
- template_dir,
+ (
+ td
+ for td in (
+ self.root.server.custom_template_directory,
+ template_dir,
+ )
+ if td
+ ), # Filter out template_dir if not provided
)
subjects_config = email_config.get("subjects", {})
@@ -346,6 +387,9 @@ class EmailConfig(Config):
"""\
# Configuration for sending emails from Synapse.
#
+ # Server admins can configure custom templates for email content. See
+ # https://matrix-org.github.io/synapse/latest/templates.html for more information.
+ #
email:
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
#
@@ -368,6 +412,14 @@ class EmailConfig(Config):
#
#require_transport_security: true
+ # Uncomment the following to disable TLS for SMTP.
+ #
+ # By default, if the server supports TLS, it will be used, and the server
+ # must present a certificate that is valid for 'smtp_host'. If this option
+ # is set to false, TLS will not be used.
+ #
+ #enable_tls: false
+
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
@@ -414,49 +466,6 @@ class EmailConfig(Config):
#
#invite_client_location: https://app.element.io
- # Directory in which Synapse will try to find the template files below.
- # If not set, or the files named below are not found within the template
- # directory, default templates from within the Synapse package will be used.
- #
- # Synapse will look for the following templates in this directory:
- #
- # * The contents of email notifications of missed events: 'notif_mail.html' and
- # 'notif_mail.txt'.
- #
- # * The contents of account expiry notice emails: 'notice_expiry.html' and
- # 'notice_expiry.txt'.
- #
- # * The contents of password reset emails sent by the homeserver:
- # 'password_reset.html' and 'password_reset.txt'
- #
- # * An HTML page that a user will see when they follow the link in the password
- # reset email. The user will be asked to confirm the action before their
- # password is reset: 'password_reset_confirmation.html'
- #
- # * HTML pages for success and failure that a user will see when they confirm
- # the password reset flow using the page above: 'password_reset_success.html'
- # and 'password_reset_failure.html'
- #
- # * The contents of address verification emails sent during registration:
- # 'registration.html' and 'registration.txt'
- #
- # * HTML pages for success and failure that a user will see when they follow
- # the link in an address verification email sent during registration:
- # 'registration_success.html' and 'registration_failure.html'
- #
- # * The contents of address verification emails sent when an address is added
- # to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
- #
- # * HTML pages for success and failure that a user will see when they follow
- # the link in an address verification email sent when an address is added
- # to a Matrix account: 'add_threepid_success.html' and
- # 'add_threepid_failure.html'
- #
- # You can see the default templates at:
- # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
- #
- #template_dir: "res/templates"
-
# Subjects to use when sending emails from Synapse.
#
# The placeholder '%%(app)s' will be replaced with the value of the 'app_name'
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 4c60ee8c28..95deda11a5 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -37,4 +37,10 @@ class ExperimentalConfig(Config):
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
# MSC3244 (room version capabilities)
- self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False)
+ self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
+
+ # MSC3283 (set displayname, avatar_url and change 3pid capabilities)
+ self.msc3283_enabled: bool = experimental.get("msc3283_enabled", False)
+
+ # MSC3266 (room summary api)
+ self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 1f42a51857..442f1b9ac0 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -30,6 +30,7 @@ from .key import KeyConfig
from .logger import LoggingConfig
from .metrics import MetricsConfig
from .modules import ModulesConfig
+from .oembed import OembedConfig
from .oidc import OIDCConfig
from .password_auth_providers import PasswordAuthProviderConfig
from .push import PushConfig
@@ -65,6 +66,7 @@ class HomeServerConfig(RootConfig):
LoggingConfig,
RatelimitConfig,
ContentRepositoryConfig,
+ OembedConfig,
CaptchaConfig,
VoipConfig,
RegistrationConfig,
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index dcd3ed1dac..4a398a7932 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -67,20 +67,30 @@ handlers:
backupCount: 3 # Does not include the current log file.
encoding: utf8
- # Default to buffering writes to log file for efficiency. This means that
- # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
- # logs will still be flushed immediately.
+ # Default to buffering writes to log file for efficiency.
+ # WARNING/ERROR logs will still be flushed immediately, but there will be a
+ # delay (of up to `period` seconds, or until the buffer is full with
+ # `capacity` messages) before INFO/DEBUG logs get written.
buffer:
class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
target: file
- # The capacity is the number of log lines that are buffered before
- # being written to disk. Increasing this will lead to better
+
+ # The capacity is the maximum number of log lines that are buffered
+ # before being written to disk. Increasing this will lead to better
# performance, at the expensive of it taking longer for log lines to
# be written to disk.
+ # This parameter is required.
capacity: 10
- flushLevel: 30 # Flush for WARNING logs as well
+
+ # Logs with a level at or above the flush level will cause the buffer to
+ # be flushed immediately.
+ # Default value: 40 (ERROR)
+ # Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
+ flushLevel: 30 # Flush immediately for WARNING logs and higher
+
# The period of time, in seconds, between forced flushes.
# Messages will not be delayed for longer than this time.
+ # Default value: 5 seconds
period: 5
# A handler that writes logs to stderr. Unused by default, but can be used
diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py
new file mode 100644
index 0000000000..09267b5eef
--- /dev/null
+++ b/synapse/config/oembed.py
@@ -0,0 +1,180 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import re
+from typing import Any, Dict, Iterable, List, Pattern
+from urllib import parse as urlparse
+
+import attr
+import pkg_resources
+
+from synapse.types import JsonDict
+
+from ._base import Config, ConfigError
+from ._util import validate_config
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class OEmbedEndpointConfig:
+ # The API endpoint to fetch.
+ api_endpoint: str
+ # The patterns to match.
+ url_patterns: List[Pattern]
+
+
+class OembedConfig(Config):
+ """oEmbed Configuration"""
+
+ section = "oembed"
+
+ def read_config(self, config, **kwargs):
+ oembed_config: Dict[str, Any] = config.get("oembed") or {}
+
+ # A list of patterns which will be used.
+ self.oembed_patterns: List[OEmbedEndpointConfig] = list(
+ self._parse_and_validate_providers(oembed_config)
+ )
+
+ def _parse_and_validate_providers(
+ self, oembed_config: dict
+ ) -> Iterable[OEmbedEndpointConfig]:
+ """Extract and parse the oEmbed providers from the given JSON file.
+
+ Returns a generator which yields the OidcProviderConfig objects
+ """
+ # Whether to use the packaged providers.json file.
+ if not oembed_config.get("disable_default_providers") or False:
+ providers = json.load(
+ pkg_resources.resource_stream("synapse", "res/providers.json")
+ )
+ yield from self._parse_and_validate_provider(
+ providers, config_path=("oembed",)
+ )
+
+ # The JSON files which includes additional provider information.
+ for i, file in enumerate(oembed_config.get("additional_providers") or []):
+ # TODO Error checking.
+ with open(file) as f:
+ providers = json.load(f)
+
+ yield from self._parse_and_validate_provider(
+ providers,
+ config_path=(
+ "oembed",
+ "additional_providers",
+ f"<item {i}>",
+ ),
+ )
+
+ def _parse_and_validate_provider(
+ self, providers: List[JsonDict], config_path: Iterable[str]
+ ) -> Iterable[OEmbedEndpointConfig]:
+ # Ensure it is the proper form.
+ validate_config(
+ _OEMBED_PROVIDER_SCHEMA,
+ providers,
+ config_path=config_path,
+ )
+
+ # Parse it and yield each result.
+ for provider in providers:
+ # Each provider might have multiple API endpoints, each which
+ # might have multiple patterns to match.
+ for endpoint in provider["endpoints"]:
+ api_endpoint = endpoint["url"]
+ patterns = [
+ self._glob_to_pattern(glob, config_path)
+ for glob in endpoint["schemes"]
+ ]
+ yield OEmbedEndpointConfig(api_endpoint, patterns)
+
+ def _glob_to_pattern(self, glob: str, config_path: Iterable[str]) -> Pattern:
+ """
+ Convert the glob into a sane regular expression to match against. The
+ rules followed will be slightly different for the domain portion vs.
+ the rest.
+
+ 1. The scheme must be one of HTTP / HTTPS (and have no globs).
+ 2. The domain can have globs, but we limit it to characters that can
+ reasonably be a domain part.
+ TODO: This does not attempt to handle Unicode domain names.
+ TODO: The domain should not allow wildcard TLDs.
+ 3. Other parts allow a glob to be any one, or more, characters.
+ """
+ results = urlparse.urlparse(glob)
+
+ # Ensure the scheme does not have wildcards (and is a sane scheme).
+ if results.scheme not in {"http", "https"}:
+ raise ConfigError(f"Insecure oEmbed scheme: {results.scheme}", config_path)
+
+ pattern = urlparse.urlunparse(
+ [
+ results.scheme,
+ re.escape(results.netloc).replace("\\*", "[a-zA-Z0-9_-]+"),
+ ]
+ + [re.escape(part).replace("\\*", ".+") for part in results[2:]]
+ )
+ return re.compile(pattern)
+
+ def generate_config_section(self, **kwargs):
+ return """\
+ # oEmbed allows for easier embedding content from a website. It can be
+ # used for generating URLs previews of services which support it.
+ #
+ oembed:
+ # A default list of oEmbed providers is included with Synapse.
+ #
+ # Uncomment the following to disable using these default oEmbed URLs.
+ # Defaults to 'false'.
+ #
+ #disable_default_providers: true
+
+ # Additional files with oEmbed configuration (each should be in the
+ # form of providers.json).
+ #
+ # By default, this list is empty (so only the default providers.json
+ # is used).
+ #
+ #additional_providers:
+ # - oembed/my_providers.json
+ """
+
+
+_OEMBED_PROVIDER_SCHEMA = {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "provider_name": {"type": "string"},
+ "provider_url": {"type": "string"},
+ "endpoints": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "schemes": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
+ "url": {"type": "string"},
+ "formats": {"type": "array", "items": {"type": "string"}},
+ "discovery": {"type": "boolean"},
+ },
+ "required": ["schemes", "url"],
+ },
+ },
+ },
+ "required": ["provider_name", "provider_url", "endpoints"],
+ },
+}
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 7a8d5851c4..f856327bd8 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -79,6 +79,11 @@ class RatelimitConfig(Config):
self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
+ self.rc_registration_token_validity = RateLimitConfig(
+ config.get("rc_registration_token_validity", {}),
+ defaults={"per_second": 0.1, "burst_count": 5},
+ )
+
rc_login_config = config.get("rc_login", {})
self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
@@ -143,6 +148,8 @@ class RatelimitConfig(Config):
# is using
# - one for registration that ratelimits registration requests based on the
# client's IP address.
+ # - one for checking the validity of registration tokens that ratelimits
+ # requests based on the client's IP address.
# - one for login that ratelimits login requests based on the client's IP
# address.
# - one for login that ratelimits login requests based on the account the
@@ -171,6 +178,10 @@ class RatelimitConfig(Config):
# per_second: 0.17
# burst_count: 3
#
+ #rc_registration_token_validity:
+ # per_second: 0.1
+ # burst_count: 5
+ #
#rc_login:
# address:
# per_second: 0.17
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 0ad919b139..7cffdacfa5 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -33,6 +33,9 @@ class RegistrationConfig(Config):
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
+ self.registration_requires_token = config.get(
+ "registration_requires_token", False
+ )
self.registration_shared_secret = config.get("registration_shared_secret")
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
@@ -140,6 +143,9 @@ class RegistrationConfig(Config):
"mechanism by removing the `access_token_lifetime` option."
)
+ # The fallback template used for authenticating using a registration token
+ self.registration_token_template = self.read_template("registration_token.html")
+
# The success template used during fallback auth.
self.fallback_success_template = self.read_template("auth_success.html")
@@ -199,6 +205,15 @@ class RegistrationConfig(Config):
#
#enable_3pid_lookup: true
+ # Require users to submit a token during registration.
+ # Tokens can be managed using the admin API:
+ # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/registration_tokens.html
+ # Note that `enable_registration` must be set to `true`.
+ # Disabling this option will not delete any tokens previously generated.
+ # Defaults to false. Uncomment the following to require tokens:
+ #
+ #registration_requires_token: true
+
# If set, allows registration of standard or admin accounts by anyone who
# has the shared secret, even if registration is otherwise disabled.
#
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 0dfb3a227a..7481f3bf5f 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -12,9 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import os
from collections import namedtuple
from typing import Dict, List
+from urllib.request import getproxies_environment # type: ignore
from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set
from synapse.python_dependencies import DependencyException, check_requirements
@@ -22,6 +24,8 @@ from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
+logger = logging.getLogger(__name__)
+
DEFAULT_THUMBNAIL_SIZES = [
{"width": 32, "height": 32, "method": "crop"},
{"width": 96, "height": 96, "method": "crop"},
@@ -36,6 +40,9 @@ THUMBNAIL_SIZE_YAML = """\
# method: %(method)s
"""
+HTTP_PROXY_SET_WARNING = """\
+The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured."""
+
ThumbnailRequirement = namedtuple(
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
)
@@ -180,12 +187,17 @@ class ContentRepositoryConfig(Config):
e.message # noqa: B306, DependencyException.message is a property
)
+ proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config:
- raise ConfigError(
- "For security, you must specify an explicit target IP address "
- "blacklist in url_preview_ip_range_blacklist for url previewing "
- "to work"
- )
+ if "http" not in proxy_env or "https" not in proxy_env:
+ raise ConfigError(
+ "For security, you must specify an explicit target IP address "
+ "blacklist in url_preview_ip_range_blacklist for url previewing "
+ "to work"
+ )
+ else:
+ if "http" in proxy_env or "https" in proxy_env:
+ logger.warning("".join(HTTP_PROXY_SET_WARNING))
# we always blacklist '0.0.0.0' and '::', which are supposed to be
# unroutable addresses.
@@ -292,6 +304,8 @@ class ContentRepositoryConfig(Config):
# This must be specified if url_preview_enabled is set. It is recommended that
# you uncomment the following list as a starting point.
#
+ # Note: The value is ignored when an HTTP proxy is in use
+ #
#url_preview_ip_range_blacklist:
%(ip_range_blacklist)s
diff --git a/synapse/config/server.py b/synapse/config/server.py
index b9e0c0b300..7b9109a592 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -25,11 +25,14 @@ import attr
import yaml
from netaddr import AddrFormatError, IPNetwork, IPSet
+from twisted.conch.ssh.keys import Key
+
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_server_name
from ._base import Config, ConfigError
+from ._util import validate_config
logger = logging.Logger(__name__)
@@ -216,6 +219,16 @@ class ListenerConfig:
http_options = attr.ib(type=Optional[HttpListenerConfig], default=None)
+@attr.s(frozen=True)
+class ManholeConfig:
+ """Object describing the configuration of the manhole"""
+
+ username = attr.ib(type=str, validator=attr.validators.instance_of(str))
+ password = attr.ib(type=str, validator=attr.validators.instance_of(str))
+ priv_key = attr.ib(type=Optional[Key])
+ pub_key = attr.ib(type=Optional[Key])
+
+
class ServerConfig(Config):
section = "server"
@@ -248,6 +261,7 @@ class ServerConfig(Config):
self.use_presence = config.get("use_presence", True)
# Custom presence router module
+ # This is the legacy way of configuring it (the config should now be put in the modules section)
self.presence_router_module_class = None
self.presence_router_config = None
presence_router_config = presence_config.get("presence_router")
@@ -648,6 +662,41 @@ class ServerConfig(Config):
)
)
+ manhole_settings = config.get("manhole_settings") or {}
+ validate_config(
+ _MANHOLE_SETTINGS_SCHEMA, manhole_settings, ("manhole_settings",)
+ )
+
+ manhole_username = manhole_settings.get("username", "matrix")
+ manhole_password = manhole_settings.get("password", "rabbithole")
+ manhole_priv_key_path = manhole_settings.get("ssh_priv_key_path")
+ manhole_pub_key_path = manhole_settings.get("ssh_pub_key_path")
+
+ manhole_priv_key = None
+ if manhole_priv_key_path is not None:
+ try:
+ manhole_priv_key = Key.fromFile(manhole_priv_key_path)
+ except Exception as e:
+ raise ConfigError(
+ f"Failed to read manhole private key file {manhole_priv_key_path}"
+ ) from e
+
+ manhole_pub_key = None
+ if manhole_pub_key_path is not None:
+ try:
+ manhole_pub_key = Key.fromFile(manhole_pub_key_path)
+ except Exception as e:
+ raise ConfigError(
+ f"Failed to read manhole public key file {manhole_pub_key_path}"
+ ) from e
+
+ self.manhole_settings = ManholeConfig(
+ username=manhole_username,
+ password=manhole_password,
+ priv_key=manhole_priv_key,
+ pub_key=manhole_pub_key,
+ )
+
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warning(METRICS_PORT_WARNING)
@@ -710,11 +759,29 @@ class ServerConfig(Config):
# Turn the list into a set to improve lookup speed.
self.next_link_domain_whitelist = set(next_link_domain_whitelist)
+ templates_config = config.get("templates") or {}
+ if not isinstance(templates_config, dict):
+ raise ConfigError("The 'templates' section must be a dictionary")
+
+ self.custom_template_directory: Optional[str] = templates_config.get(
+ "custom_template_directory"
+ )
+ if self.custom_template_directory is not None and not isinstance(
+ self.custom_template_directory, str
+ ):
+ raise ConfigError("'custom_template_directory' must be a string")
+
def has_tls_listener(self) -> bool:
return any(listener.tls for listener in self.listeners)
def generate_config_section(
- self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
+ self,
+ server_name,
+ data_dir_path,
+ open_private_ports,
+ listeners,
+ config_dir_path,
+ **kwargs,
):
ip_range_blacklist = "\n".join(
" # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST
@@ -858,20 +925,6 @@ class ServerConfig(Config):
#
#enabled: false
- # Presence routers are third-party modules that can specify additional logic
- # to where presence updates from users are routed.
- #
- presence_router:
- # The custom module's class. Uncomment to use a custom presence router module.
- #
- #module: "my_custom_router.PresenceRouter"
-
- # Configuration options of the custom module. Refer to your module's
- # documentation for available options.
- #
- #config:
- # example_option: 'something'
-
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API. Defaults to
# 'false'. Note that profile data is also available via the federation
@@ -960,6 +1013,8 @@ class ServerConfig(Config):
#
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
#
+ # Note: The value is ignored when an HTTP proxy is in use
+ #
#ip_range_blacklist:
%(ip_range_blacklist)s
@@ -1067,6 +1122,24 @@ class ServerConfig(Config):
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
+ # Connection settings for the manhole
+ #
+ manhole_settings:
+ # The username for the manhole. This defaults to 'matrix'.
+ #
+ #username: manhole
+
+ # The password for the manhole. This defaults to 'rabbithole'.
+ #
+ #password: mypassword
+
+ # The private and public SSH key pair used to encrypt the manhole traffic.
+ # If these are left unset, then hardcoded and non-secret keys are used,
+ # which could allow traffic to be intercepted if sent over a public network.
+ #
+ #ssh_priv_key_path: %(config_dir_path)s/id_rsa
+ #ssh_pub_key_path: %(config_dir_path)s/id_rsa.pub
+
# Forward extremities can build up in a room due to networking delays between
# homeservers. Once this happens in a large room, calculation of the state of
# that room can become quite expensive. To mitigate this, once the number of
@@ -1282,6 +1355,19 @@ class ServerConfig(Config):
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
+
+ # Templates to use when generating email or HTML page contents.
+ #
+ templates:
+ # Directory in which Synapse will try to find template files to use to generate
+ # email or HTML page contents.
+ # If not set, or a file is not found within the template directory, a default
+ # template from within the Synapse package will be used.
+ #
+ # See https://matrix-org.github.io/synapse/latest/templates.html for more
+ # information about using custom templates.
+ #
+ #custom_template_directory: /path/to/custom/templates/
"""
% locals()
)
@@ -1422,3 +1508,14 @@ def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
if name == "webclient":
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
return
+
+
+_MANHOLE_SETTINGS_SCHEMA = {
+ "type": "object",
+ "properties": {
+ "username": {"type": "string"},
+ "password": {"type": "string"},
+ "ssh_priv_key_path": {"type": "string"},
+ "ssh_pub_key_path": {"type": "string"},
+ },
+}
diff --git a/synapse/config/sso.py b/synapse/config/sso.py
index d0f04cf8e6..524a7ff3aa 100644
--- a/synapse/config/sso.py
+++ b/synapse/config/sso.py
@@ -11,12 +11,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
from typing import Any, Dict, Optional
import attr
from ._base import Config
+logger = logging.getLogger(__name__)
+
+LEGACY_TEMPLATE_DIR_WARNING = """
+This server's configuration file is using the deprecated 'template_dir' setting in the
+'sso' section. Support for this setting has been deprecated and will be removed in a
+future version of Synapse. Server admins should instead use the new
+'custom_templates_directory' setting documented here:
+https://matrix-org.github.io/synapse/latest/templates.html
+---------------------------------------------------------------------------------------"""
+
@attr.s(frozen=True)
class SsoAttributeRequirement:
@@ -43,8 +54,15 @@ class SSOConfig(Config):
# The sso-specific template_dir
self.sso_template_dir = sso_config.get("template_dir")
+ if self.sso_template_dir is not None:
+ logger.warning(LEGACY_TEMPLATE_DIR_WARNING)
# Read templates from disk
+ custom_template_directories = (
+ self.root.server.custom_template_directory,
+ self.sso_template_dir,
+ )
+
(
self.sso_login_idp_picker_template,
self.sso_redirect_confirm_template,
@@ -63,7 +81,7 @@ class SSOConfig(Config):
"sso_auth_success.html",
"sso_auth_bad_user.html",
],
- self.sso_template_dir,
+ (td for td in custom_template_directories if td),
)
# These templates have no placeholders, so render them here
@@ -94,6 +112,9 @@ class SSOConfig(Config):
# Additional settings to use with single-sign on systems such as OpenID Connect,
# SAML2 and CAS.
#
+ # Server admins can configure custom templates for pages related to SSO. See
+ # https://matrix-org.github.io/synapse/latest/templates.html for more information.
+ #
sso:
# A list of client URLs which are whitelisted so that the user does not
# have to confirm giving access to their account to the URL. Any client
@@ -125,167 +146,4 @@ class SSOConfig(Config):
# information when first signing in. Defaults to false.
#
#update_profile_information: true
-
- # Directory in which Synapse will try to find the template files below.
- # If not set, or the files named below are not found within the template
- # directory, default templates from within the Synapse package will be used.
- #
- # Synapse will look for the following templates in this directory:
- #
- # * HTML page to prompt the user to choose an Identity Provider during
- # login: 'sso_login_idp_picker.html'.
- #
- # This is only used if multiple SSO Identity Providers are configured.
- #
- # When rendering, this template is given the following variables:
- # * redirect_url: the URL that the user will be redirected to after
- # login.
- #
- # * server_name: the homeserver's name.
- #
- # * providers: a list of available Identity Providers. Each element is
- # an object with the following attributes:
- #
- # * idp_id: unique identifier for the IdP
- # * idp_name: user-facing name for the IdP
- # * idp_icon: if specified in the IdP config, an MXC URI for an icon
- # for the IdP
- # * idp_brand: if specified in the IdP config, a textual identifier
- # for the brand of the IdP
- #
- # The rendered HTML page should contain a form which submits its results
- # back as a GET request, with the following query parameters:
- #
- # * redirectUrl: the client redirect URI (ie, the `redirect_url` passed
- # to the template)
- #
- # * idp: the 'idp_id' of the chosen IDP.
- #
- # * HTML page to prompt new users to enter a userid and confirm other
- # details: 'sso_auth_account_details.html'. This is only shown if the
- # SSO implementation (with any user_mapping_provider) does not return
- # a localpart.
- #
- # When rendering, this template is given the following variables:
- #
- # * server_name: the homeserver's name.
- #
- # * idp: details of the SSO Identity Provider that the user logged in
- # with: an object with the following attributes:
- #
- # * idp_id: unique identifier for the IdP
- # * idp_name: user-facing name for the IdP
- # * idp_icon: if specified in the IdP config, an MXC URI for an icon
- # for the IdP
- # * idp_brand: if specified in the IdP config, a textual identifier
- # for the brand of the IdP
- #
- # * user_attributes: an object containing details about the user that
- # we received from the IdP. May have the following attributes:
- #
- # * display_name: the user's display_name
- # * emails: a list of email addresses
- #
- # The template should render a form which submits the following fields:
- #
- # * username: the localpart of the user's chosen user id
- #
- # * HTML page allowing the user to consent to the server's terms and
- # conditions. This is only shown for new users, and only if
- # `user_consent.require_at_registration` is set.
- #
- # When rendering, this template is given the following variables:
- #
- # * server_name: the homeserver's name.
- #
- # * user_id: the user's matrix proposed ID.
- #
- # * user_profile.display_name: the user's proposed display name, if any.
- #
- # * consent_version: the version of the terms that the user will be
- # shown
- #
- # * terms_url: a link to the page showing the terms.
- #
- # The template should render a form which submits the following fields:
- #
- # * accepted_version: the version of the terms accepted by the user
- # (ie, 'consent_version' from the input variables).
- #
- # * HTML page for a confirmation step before redirecting back to the client
- # with the login token: 'sso_redirect_confirm.html'.
- #
- # When rendering, this template is given the following variables:
- #
- # * redirect_url: the URL the user is about to be redirected to.
- #
- # * display_url: the same as `redirect_url`, but with the query
- # parameters stripped. The intention is to have a
- # human-readable URL to show to users, not to use it as
- # the final address to redirect to.
- #
- # * server_name: the homeserver's name.
- #
- # * new_user: a boolean indicating whether this is the user's first time
- # logging in.
- #
- # * user_id: the user's matrix ID.
- #
- # * user_profile.avatar_url: an MXC URI for the user's avatar, if any.
- # None if the user has not set an avatar.
- #
- # * user_profile.display_name: the user's display name. None if the user
- # has not set a display name.
- #
- # * HTML page which notifies the user that they are authenticating to confirm
- # an operation on their account during the user interactive authentication
- # process: 'sso_auth_confirm.html'.
- #
- # When rendering, this template is given the following variables:
- # * redirect_url: the URL the user is about to be redirected to.
- #
- # * description: the operation which the user is being asked to confirm
- #
- # * idp: details of the Identity Provider that we will use to confirm
- # the user's identity: an object with the following attributes:
- #
- # * idp_id: unique identifier for the IdP
- # * idp_name: user-facing name for the IdP
- # * idp_icon: if specified in the IdP config, an MXC URI for an icon
- # for the IdP
- # * idp_brand: if specified in the IdP config, a textual identifier
- # for the brand of the IdP
- #
- # * HTML page shown after a successful user interactive authentication session:
- # 'sso_auth_success.html'.
- #
- # Note that this page must include the JavaScript which notifies of a successful authentication
- # (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
- #
- # This template has no additional variables.
- #
- # * HTML page shown after a user-interactive authentication session which
- # does not map correctly onto the expected user: 'sso_auth_bad_user.html'.
- #
- # When rendering, this template is given the following variables:
- # * server_name: the homeserver's name.
- # * user_id_to_verify: the MXID of the user that we are trying to
- # validate.
- #
- # * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
- # attempts to login: 'sso_account_deactivated.html'.
- #
- # This template has no additional variables.
- #
- # * HTML page to display to users if something goes wrong during the
- # OpenID Connect authentication process: 'sso_error.html'.
- #
- # When rendering, this template is given two variables:
- # * error: the technical name of the error
- # * error_description: a human-readable message for the error
- #
- # You can see the default templates at:
- # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
- #
- #template_dir: "res/templates"
"""
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 4c92e9a2d4..b63a1afe93 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -216,21 +216,18 @@ def check(
def _check_size_limits(event: EventBase) -> None:
- def too_big(field):
- raise EventSizeError("%s too large" % (field,))
-
if len(event.user_id) > 255:
- too_big("user_id")
+ raise EventSizeError("'user_id' too large")
if len(event.room_id) > 255:
- too_big("room_id")
+ raise EventSizeError("'room_id' too large")
if event.is_state() and len(event.state_key) > 255:
- too_big("state_key")
+ raise EventSizeError("'state_key' too large")
if len(event.type) > 255:
- too_big("type")
+ raise EventSizeError("'type' too large")
if len(event.event_id) > 255:
- too_big("event_id")
+ raise EventSizeError("'event_id' too large")
if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
- too_big("event")
+ raise EventSizeError("event too large")
def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
@@ -370,10 +367,7 @@ def _is_membership_change_allowed(
raise AuthError(403, "You are banned from this room")
elif join_rule == JoinRules.PUBLIC:
pass
- elif (
- room_version.msc3083_join_rules
- and join_rule == JoinRules.MSC3083_RESTRICTED
- ):
+ elif room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED:
# This is the same as public, but the event must contain a reference
# to the server who authorised the join. If the event does not contain
# the proper content it is rejected.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 0298af4c02..a730c1719a 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -396,10 +396,11 @@ class FrozenEvent(EventBase):
return self.__repr__()
def __repr__(self):
- return "<FrozenEvent event_id=%r, type=%r, state_key=%r>" % (
+ return "<FrozenEvent event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
self.get("event_id", None),
self.get("type", None),
self.get("state_key", None),
+ self.internal_metadata.is_outlier(),
)
diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py
index 6c37c8a7a4..eb4556cdc1 100644
--- a/synapse/events/presence_router.py
+++ b/synapse/events/presence_router.py
@@ -11,45 +11,115 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from typing import TYPE_CHECKING, Dict, Iterable, Set, Union
+import logging
+from typing import (
+ TYPE_CHECKING,
+ Awaitable,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Union,
+)
from synapse.api.presence import UserPresenceState
+from synapse.util.async_helpers import maybe_awaitable
if TYPE_CHECKING:
from synapse.server import HomeServer
+GET_USERS_FOR_STATES_CALLBACK = Callable[
+ [Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
+]
+GET_INTERESTED_USERS_CALLBACK = Callable[
+ [str], Awaitable[Union[Set[str], "PresenceRouter.ALL_USERS"]]
+]
+
+logger = logging.getLogger(__name__)
+
+
+def load_legacy_presence_router(hs: "HomeServer"):
+ """Wrapper that loads a presence router module configured using the old
+ configuration, and registers the hooks they implement.
+ """
+
+ if hs.config.presence_router_module_class is None:
+ return
+
+ module = hs.config.presence_router_module_class
+ config = hs.config.presence_router_config
+ api = hs.get_module_api()
+
+ presence_router = module(config=config, module_api=api)
+
+ # The known hooks. If a module implements a method which name appears in this set,
+ # we'll want to register it.
+ presence_router_methods = {
+ "get_users_for_states",
+ "get_interested_users",
+ }
+
+ # All methods that the module provides should be async, but this wasn't enforced
+ # in the old module system, so we wrap them if needed
+ def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
+ # f might be None if the callback isn't implemented by the module. In this
+ # case we don't want to register a callback at all so we return None.
+ if f is None:
+ return None
+
+ def run(*args, **kwargs):
+ # mypy doesn't do well across function boundaries so we need to tell it
+ # f is definitely not None.
+ assert f is not None
+
+ return maybe_awaitable(f(*args, **kwargs))
+
+ return run
+
+ # Register the hooks through the module API.
+ hooks = {
+ hook: async_wrapper(getattr(presence_router, hook, None))
+ for hook in presence_router_methods
+ }
+
+ api.register_presence_router_callbacks(**hooks)
+
class PresenceRouter:
"""
A module that the homeserver will call upon to help route user presence updates to
- additional destinations. If a custom presence router is configured, calls will be
- passed to that instead.
+ additional destinations.
"""
ALL_USERS = "ALL"
def __init__(self, hs: "HomeServer"):
- self.custom_presence_router = None
+ # Initially there are no callbacks
+ self._get_users_for_states_callbacks: List[GET_USERS_FOR_STATES_CALLBACK] = []
+ self._get_interested_users_callbacks: List[GET_INTERESTED_USERS_CALLBACK] = []
- # Check whether a custom presence router module has been configured
- if hs.config.presence_router_module_class:
- # Initialise the module
- self.custom_presence_router = hs.config.presence_router_module_class(
- config=hs.config.presence_router_config, module_api=hs.get_module_api()
+ def register_presence_router_callbacks(
+ self,
+ get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
+ get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
+ ):
+ # PresenceRouter modules are required to implement both of these methods
+ # or neither of them as they are assumed to act in a complementary manner
+ paired_methods = [get_users_for_states, get_interested_users]
+ if paired_methods.count(None) == 1:
+ raise RuntimeError(
+ "PresenceRouter modules must register neither or both of the paired callbacks: "
+ "[get_users_for_states, get_interested_users]"
)
- # Ensure the module has implemented the required methods
- required_methods = ["get_users_for_states", "get_interested_users"]
- for method_name in required_methods:
- if not hasattr(self.custom_presence_router, method_name):
- raise Exception(
- "PresenceRouter module '%s' must implement all required methods: %s"
- % (
- hs.config.presence_router_module_class.__name__,
- ", ".join(required_methods),
- )
- )
+ # Append the methods provided to the lists of callbacks
+ if get_users_for_states is not None:
+ self._get_users_for_states_callbacks.append(get_users_for_states)
+
+ if get_interested_users is not None:
+ self._get_interested_users_callbacks.append(get_interested_users)
async def get_users_for_states(
self,
@@ -66,14 +136,40 @@ class PresenceRouter:
A dictionary of user_id -> set of UserPresenceState, indicating which
presence updates each user should receive.
"""
- if self.custom_presence_router is not None:
- # Ask the custom module
- return await self.custom_presence_router.get_users_for_states(
- state_updates=state_updates
- )
- # Don't include any extra destinations for presence updates
- return {}
+ # Bail out early if we don't have any callbacks to run.
+ if len(self._get_users_for_states_callbacks) == 0:
+ # Don't include any extra destinations for presence updates
+ return {}
+
+ users_for_states = {}
+ # run all the callbacks for get_users_for_states and combine the results
+ for callback in self._get_users_for_states_callbacks:
+ try:
+ result = await callback(state_updates)
+ except Exception as e:
+ logger.warning("Failed to run module API callback %s: %s", callback, e)
+ continue
+
+ if not isinstance(result, Dict):
+ logger.warning(
+ "Wrong type returned by module API callback %s: %s, expected Dict",
+ callback,
+ result,
+ )
+ continue
+
+ for key, new_entries in result.items():
+ if not isinstance(new_entries, Set):
+ logger.warning(
+ "Wrong type returned by module API callback %s: %s, expected Set",
+ callback,
+ new_entries,
+ )
+ break
+ users_for_states.setdefault(key, set()).update(new_entries)
+
+ return users_for_states
async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]:
"""
@@ -92,12 +188,36 @@ class PresenceRouter:
A set of user IDs to return presence updates for, or ALL_USERS to return all
known updates.
"""
- if self.custom_presence_router is not None:
- # Ask the custom module for interested users
- return await self.custom_presence_router.get_interested_users(
- user_id=user_id
- )
- # A custom presence router is not defined.
- # Don't report any additional interested users
- return set()
+ # Bail out early if we don't have any callbacks to run.
+ if len(self._get_interested_users_callbacks) == 0:
+ # Don't report any additional interested users
+ return set()
+
+ interested_users = set()
+ # run all the callbacks for get_interested_users and combine the results
+ for callback in self._get_interested_users_callbacks:
+ try:
+ result = await callback(user_id)
+ except Exception as e:
+ logger.warning("Failed to run module API callback %s: %s", callback, e)
+ continue
+
+ # If one of the callbacks returns ALL_USERS then we can stop calling all
+ # of the other callbacks, since the set of interested_users is already as
+ # large as it can possibly be
+ if result == PresenceRouter.ALL_USERS:
+ return PresenceRouter.ALL_USERS
+
+ if not isinstance(result, Set):
+ logger.warning(
+ "Wrong type returned by module API callback %s: %s, expected set",
+ callback,
+ result,
+ )
+ continue
+
+ # Add the new interested users to the set
+ interested_users.update(result)
+
+ return interested_users
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index a0c07f62f4..fb22337e27 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -17,7 +17,7 @@ from typing import Any, Mapping, Union
from frozendict import frozendict
-from synapse.api.constants import EventTypes, RelationTypes
+from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.util.async_helpers import yieldable_gather_results
@@ -32,6 +32,9 @@ from . import EventBase
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
+CANONICALJSON_MAX_INT = (2 ** 53) - 1
+CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
+
def prune_event(event: EventBase) -> EventBase:
"""Returns a pruned version of the given event, which removes all keys we
@@ -101,6 +104,8 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
if event_type == EventTypes.Member:
add_fields("membership")
+ if room_version.msc3375_redaction_rules:
+ add_fields("join_authorised_via_users_server")
elif event_type == EventTypes.Create:
# MSC2176 rules state that create events cannot be redacted.
if room_version.msc2176_redaction_rules:
@@ -135,6 +140,12 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
add_fields("history_visibility")
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
add_fields("redacts")
+ elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
+ add_fields(EventContentFields.MSC2716_NEXT_CHUNK_ID)
+ elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_CHUNK:
+ add_fields(EventContentFields.MSC2716_CHUNK_ID)
+ elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
+ add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
@@ -499,7 +510,7 @@ def validate_canonicaljson(value: Any):
* NaN, Infinity, -Infinity
"""
if isinstance(value, int):
- if value <= -(2 ** 53) or 2 ** 53 <= value:
+ if value < CANONICALJSON_MIN_INT or CANONICALJSON_MAX_INT < value:
raise SynapseError(400, "JSON integer out of range", Codes.BAD_JSON)
elif isinstance(value, float):
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index fa6987d7cb..33954b4f62 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -11,16 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import collections.abc
from typing import Union
+import jsonschema
+
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import EventFormatVersions
from synapse.config.homeserver import HomeServerConfig
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
-from synapse.events.utils import validate_canonicaljson
+from synapse.events.utils import (
+ CANONICALJSON_MAX_INT,
+ CANONICALJSON_MIN_INT,
+ validate_canonicaljson,
+)
from synapse.federation.federation_server import server_matches_acl_event
from synapse.types import EventID, RoomID, UserID
@@ -87,6 +93,29 @@ class EventValidator:
400, "Can't create an ACL event that denies the local server"
)
+ if event.type == EventTypes.PowerLevels:
+ try:
+ jsonschema.validate(
+ instance=event.content,
+ schema=POWER_LEVELS_SCHEMA,
+ cls=plValidator,
+ )
+ except jsonschema.ValidationError as e:
+ if e.path:
+ # example: "users_default": '0' is not of type 'integer'
+ message = '"' + e.path[-1] + '": ' + e.message # noqa: B306
+ # jsonschema.ValidationError.message is a valid attribute
+ else:
+ # example: '0' is not of type 'integer'
+ message = e.message # noqa: B306
+ # jsonschema.ValidationError.message is a valid attribute
+
+ raise SynapseError(
+ code=400,
+ msg=message,
+ errcode=Codes.BAD_JSON,
+ )
+
def _validate_retention(self, event: EventBase):
"""Checks that an event that defines the retention policy for a room respects the
format enforced by the spec.
@@ -185,3 +214,47 @@ class EventValidator:
def _ensure_state_event(self, event):
if not event.is_state():
raise SynapseError(400, "'%s' must be state events" % (event.type,))
+
+
+POWER_LEVELS_SCHEMA = {
+ "type": "object",
+ "properties": {
+ "ban": {"$ref": "#/definitions/int"},
+ "events": {"$ref": "#/definitions/objectOfInts"},
+ "events_default": {"$ref": "#/definitions/int"},
+ "invite": {"$ref": "#/definitions/int"},
+ "kick": {"$ref": "#/definitions/int"},
+ "notifications": {"$ref": "#/definitions/objectOfInts"},
+ "redact": {"$ref": "#/definitions/int"},
+ "state_default": {"$ref": "#/definitions/int"},
+ "users": {"$ref": "#/definitions/objectOfInts"},
+ "users_default": {"$ref": "#/definitions/int"},
+ },
+ "definitions": {
+ "int": {
+ "type": "integer",
+ "minimum": CANONICALJSON_MIN_INT,
+ "maximum": CANONICALJSON_MAX_INT,
+ },
+ "objectOfInts": {
+ "type": "object",
+ "additionalProperties": {"$ref": "#/definitions/int"},
+ },
+ },
+}
+
+
+def _create_power_level_validator():
+ validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
+
+ # by default jsonschema does not consider a frozendict to be an object so
+ # we need to use a custom type checker
+ # https://python-jsonschema.readthedocs.io/en/stable/validate/?highlight=object#validating-with-additional-types
+ type_checker = validator.TYPE_CHECKER.redefine(
+ "object", lambda checker, thing: isinstance(thing, collections.abc.Mapping)
+ )
+
+ return jsonschema.validators.extend(validator, type_checker=type_checker)
+
+
+plValidator = _create_power_level_validator()
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index a837c18726..2dc1a2397d 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -43,6 +43,7 @@ from synapse.api.errors import (
Codes,
FederationDeniedError,
HttpResponseException,
+ RequestSendFailed,
SynapseError,
UnsupportedRoomVersionError,
)
@@ -111,6 +112,23 @@ class FederationClient(FederationBase):
reset_expiry_on_get=False,
)
+ # A cache for fetching the room hierarchy over federation.
+ #
+ # Some stale data over federation is OK, but must be refreshed
+ # periodically since the local server is in the room.
+ #
+ # It is a map of (room ID, suggested-only) -> the response of
+ # get_room_hierarchy.
+ self._get_room_hierarchy_cache: ExpiringCache[
+ Tuple[str, bool], Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]
+ ] = ExpiringCache(
+ cache_name="get_room_hierarchy_cache",
+ clock=self._clock,
+ max_len=1000,
+ expiry_ms=5 * 60 * 1000,
+ reset_expiry_on_get=False,
+ )
+
def _clear_tried_cache(self):
"""Clear pdu_destination_tried cache"""
now = self._clock.time_msec()
@@ -559,7 +577,11 @@ class FederationClient(FederationBase):
try:
return await callback(destination)
- except InvalidResponseError as e:
+ except (
+ RequestSendFailed,
+ InvalidResponseError,
+ NotRetryingDestination,
+ ) as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
except UnsupportedRoomVersionError:
raise
@@ -1112,7 +1134,8 @@ class FederationClient(FederationBase):
The response from the remote server.
Raises:
- HttpResponseException: There was an exception returned from the remote server
+ HttpResponseException / RequestSendFailed: There was an exception
+ returned from the remote server
SynapseException: M_FORBIDDEN when the remote server has disallowed publicRoom
requests over federation
@@ -1293,8 +1316,145 @@ class FederationClient(FederationBase):
failover_on_unknown_endpoint=True,
)
+ async def get_room_hierarchy(
+ self,
+ destinations: Iterable[str],
+ room_id: str,
+ suggested_only: bool,
+ ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
+ """
+ Call other servers to get a hierarchy of the given room.
+
+ Performs simple data validates and parsing of the response.
+
+ Args:
+ destinations: The remote servers. We will try them in turn, omitting any
+ that have been blacklisted.
+ room_id: ID of the space to be queried
+ suggested_only: If true, ask the remote server to only return children
+ with the "suggested" flag set
+
+ Returns:
+ A tuple of:
+ The room as a JSON dictionary.
+ A list of children rooms, as JSON dictionaries.
+ A list of inaccessible children room IDs.
+
+ Raises:
+ SynapseError if we were unable to get a valid summary from any of the
+ remote servers
+ """
+
+ cached_result = self._get_room_hierarchy_cache.get((room_id, suggested_only))
+ if cached_result:
+ return cached_result
-@attr.s(frozen=True, slots=True)
+ async def send_request(
+ destination: str,
+ ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
+ res = await self.transport_layer.get_room_hierarchy(
+ destination=destination,
+ room_id=room_id,
+ suggested_only=suggested_only,
+ )
+
+ room = res.get("room")
+ if not isinstance(room, dict):
+ raise InvalidResponseError("'room' must be a dict")
+
+ # Validate children_state of the room.
+ children_state = room.get("children_state", [])
+ if not isinstance(children_state, Sequence):
+ raise InvalidResponseError("'room.children_state' must be a list")
+ if any(not isinstance(e, dict) for e in children_state):
+ raise InvalidResponseError("Invalid event in 'children_state' list")
+ try:
+ [
+ FederationSpaceSummaryEventResult.from_json_dict(e)
+ for e in children_state
+ ]
+ except ValueError as e:
+ raise InvalidResponseError(str(e))
+
+ # Validate the children rooms.
+ children = res.get("children", [])
+ if not isinstance(children, Sequence):
+ raise InvalidResponseError("'children' must be a list")
+ if any(not isinstance(r, dict) for r in children):
+ raise InvalidResponseError("Invalid room in 'children' list")
+
+ # Validate the inaccessible children.
+ inaccessible_children = res.get("inaccessible_children", [])
+ if not isinstance(inaccessible_children, Sequence):
+ raise InvalidResponseError("'inaccessible_children' must be a list")
+ if any(not isinstance(r, str) for r in inaccessible_children):
+ raise InvalidResponseError(
+ "Invalid room ID in 'inaccessible_children' list"
+ )
+
+ return room, children, inaccessible_children
+
+ try:
+ result = await self._try_destination_list(
+ "fetch room hierarchy",
+ destinations,
+ send_request,
+ failover_on_unknown_endpoint=True,
+ )
+ except SynapseError as e:
+ # If an unexpected error occurred, re-raise it.
+ if e.code != 502:
+ raise
+
+ # Fallback to the old federation API and translate the results if
+ # no servers implement the new API.
+ #
+ # The algorithm below is a bit inefficient as it only attempts to
+ # parse information for the requested room, but the legacy API may
+ # return additional layers.
+ legacy_result = await self.get_space_summary(
+ destinations,
+ room_id,
+ suggested_only,
+ max_rooms_per_space=None,
+ exclude_rooms=[],
+ )
+
+ # Find the requested room in the response (and remove it).
+ for _i, room in enumerate(legacy_result.rooms):
+ if room.get("room_id") == room_id:
+ break
+ else:
+ # The requested room was not returned, nothing we can do.
+ raise
+ requested_room = legacy_result.rooms.pop(_i)
+
+ # Find any children events of the requested room.
+ children_events = []
+ children_room_ids = set()
+ for event in legacy_result.events:
+ if event.room_id == room_id:
+ children_events.append(event.data)
+ children_room_ids.add(event.state_key)
+ # And add them under the requested room.
+ requested_room["children_state"] = children_events
+
+ # Find the children rooms.
+ children = []
+ for room in legacy_result.rooms:
+ if room.get("room_id") in children_room_ids:
+ children.append(room)
+
+ # It isn't clear from the response whether some of the rooms are
+ # not accessible.
+ result = (requested_room, children, ())
+
+ # Cache the result to avoid fetching data over federation every time.
+ self._get_room_hierarchy_cache[(room_id, suggested_only)] = result
+ return result
+
+
+@attr.s(frozen=True, slots=True, auto_attribs=True)
class FederationSpaceSummaryEventResult:
"""Represents a single event in the result of a successful get_space_summary call.
@@ -1303,12 +1463,13 @@ class FederationSpaceSummaryEventResult:
object attributes.
"""
- event_type = attr.ib(type=str)
- state_key = attr.ib(type=str)
- via = attr.ib(type=Sequence[str])
+ event_type: str
+ room_id: str
+ state_key: str
+ via: Sequence[str]
# the raw data, including the above keys
- data = attr.ib(type=JsonDict)
+ data: JsonDict
@classmethod
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryEventResult":
@@ -1325,6 +1486,10 @@ class FederationSpaceSummaryEventResult:
if not isinstance(event_type, str):
raise ValueError("Invalid event: 'event_type' must be a str")
+ room_id = d.get("room_id")
+ if not isinstance(room_id, str):
+ raise ValueError("Invalid event: 'room_id' must be a str")
+
state_key = d.get("state_key")
if not isinstance(state_key, str):
raise ValueError("Invalid event: 'state_key' must be a str")
@@ -1339,15 +1504,15 @@ class FederationSpaceSummaryEventResult:
if any(not isinstance(v, str) for v in via):
raise ValueError("Invalid event: 'via' must be a list of strings")
- return cls(event_type, state_key, via, d)
+ return cls(event_type, room_id, state_key, via, d)
-@attr.s(frozen=True, slots=True)
+@attr.s(frozen=True, slots=True, auto_attribs=True)
class FederationSpaceSummaryResult:
"""Represents the data returned by a successful get_space_summary call."""
- rooms = attr.ib(type=Sequence[JsonDict])
- events = attr.ib(type=Sequence[FederationSpaceSummaryEventResult])
+ rooms: List[JsonDict]
+ events: Sequence[FederationSpaceSummaryEventResult]
@classmethod
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryResult":
@@ -1360,7 +1525,7 @@ class FederationSpaceSummaryResult:
ValueError if d is not a valid /spaces/ response
"""
rooms = d.get("rooms")
- if not isinstance(rooms, Sequence):
+ if not isinstance(rooms, List):
raise ValueError("'rooms' must be a list")
if any(not isinstance(r, dict) for r in rooms):
raise ValueError("Invalid room in 'rooms' list")
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 2892a11d7d..214ee948fa 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -110,6 +110,7 @@ class FederationServer(FederationBase):
super().__init__(hs)
self.handler = hs.get_federation_handler()
+ self._federation_event_handler = hs.get_federation_event_handler()
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
@@ -195,13 +196,17 @@ class FederationServer(FederationBase):
origin, room_id, versions, limit
)
- res = self._transaction_from_pdus(pdus).get_dict()
+ res = self._transaction_dict_from_pdus(pdus)
return 200, res
async def on_incoming_transaction(
- self, origin: str, transaction_data: JsonDict
- ) -> Tuple[int, Dict[str, Any]]:
+ self,
+ origin: str,
+ transaction_id: str,
+ destination: str,
+ transaction_data: JsonDict,
+ ) -> Tuple[int, JsonDict]:
# If we receive a transaction we should make sure that kick off handling
# any old events in the staging area.
if not self._started_handling_of_staged_events:
@@ -212,8 +217,14 @@ class FederationServer(FederationBase):
# accurate as possible.
request_time = self._clock.time_msec()
- transaction = Transaction(**transaction_data)
- transaction_id = transaction.transaction_id # type: ignore
+ transaction = Transaction(
+ transaction_id=transaction_id,
+ destination=destination,
+ origin=origin,
+ origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore
+ pdus=transaction_data.get("pdus"), # type: ignore
+ edus=transaction_data.get("edus"),
+ )
if not transaction_id:
raise Exception("Transaction missing transaction_id")
@@ -221,9 +232,7 @@ class FederationServer(FederationBase):
logger.debug("[%s] Got transaction", transaction_id)
# Reject malformed transactions early: reject if too many PDUs/EDUs
- if len(transaction.pdus) > 50 or ( # type: ignore
- hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
- ):
+ if len(transaction.pdus) > 50 or len(transaction.edus) > 100:
logger.info("Transaction PDU or EDU count too large. Returning 400")
return 400, {}
@@ -263,7 +272,7 @@ class FederationServer(FederationBase):
# CRITICAL SECTION: the first thing we must do (before awaiting) is
# add an entry to _active_transactions.
assert origin not in self._active_transactions
- self._active_transactions[origin] = transaction.transaction_id # type: ignore
+ self._active_transactions[origin] = transaction.transaction_id
try:
result = await self._handle_incoming_transaction(
@@ -291,11 +300,11 @@ class FederationServer(FederationBase):
if response:
logger.debug(
"[%s] We've already responded to this request",
- transaction.transaction_id, # type: ignore
+ transaction.transaction_id,
)
return response
- logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
+ logger.debug("[%s] Transaction is new", transaction.transaction_id)
# We process PDUs and EDUs in parallel. This is important as we don't
# want to block things like to device messages from reaching clients
@@ -334,7 +343,7 @@ class FederationServer(FederationBase):
report back to the sending server.
"""
- received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
+ received_pdus_counter.inc(len(transaction.pdus))
origin_host, _ = parse_server_name(origin)
@@ -342,7 +351,7 @@ class FederationServer(FederationBase):
newest_pdu_ts = 0
- for p in transaction.pdus: # type: ignore
+ for p in transaction.pdus:
# FIXME (richardv): I don't think this works:
# https://github.com/matrix-org/synapse/issues/8429
if "unsigned" in p:
@@ -436,10 +445,10 @@ class FederationServer(FederationBase):
return pdu_results
- async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
+ async def _handle_edus_in_txn(self, origin: str, transaction: Transaction) -> None:
"""Process the EDUs in a received transaction."""
- async def _process_edu(edu_dict):
+ async def _process_edu(edu_dict: JsonDict) -> None:
received_edus_counter.inc()
edu = Edu(
@@ -452,7 +461,7 @@ class FederationServer(FederationBase):
await concurrently_execute(
_process_edu,
- getattr(transaction, "edus", []),
+ transaction.edus,
TRANSACTION_CONCURRENCY_LIMIT,
)
@@ -538,7 +547,7 @@ class FederationServer(FederationBase):
pdu = await self.handler.get_persisted_pdu(origin, event_id)
if pdu:
- return 200, self._transaction_from_pdus([pdu]).get_dict()
+ return 200, self._transaction_dict_from_pdus([pdu])
else:
return 404, ""
@@ -779,7 +788,9 @@ class FederationServer(FederationBase):
event = await self._check_sigs_and_hash(room_version, event)
- return await self.handler.on_send_membership_event(origin, event)
+ return await self._federation_event_handler.on_send_membership_event(
+ origin, event
+ )
async def on_event_auth(
self, origin: str, room_id: str, event_id: str
@@ -879,18 +890,20 @@ class FederationServer(FederationBase):
ts_now_ms = self._clock.time_msec()
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
- def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
+ def _transaction_dict_from_pdus(self, pdu_list: List[EventBase]) -> JsonDict:
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
"""
time_now = self._clock.time_msec()
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
return Transaction(
+ # Just need a dummy transaction ID and destination since it won't be used.
+ transaction_id="",
origin=self.server_name,
pdus=pdus,
origin_server_ts=int(time_now),
- destination=None,
- )
+ destination="",
+ ).get_dict()
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
"""Process a PDU received in a federation /send/ transaction.
@@ -962,13 +975,18 @@ class FederationServer(FederationBase):
# the room, so instead of pulling the event out of the DB and parsing
# the event we just pull out the next event ID and check if that matches.
if latest_event is not None and latest_origin is not None:
- (
- next_origin,
- next_event_id,
- ) = await self.store.get_next_staged_event_id_for_room(room_id)
- if next_origin != latest_origin or next_event_id != latest_event.event_id:
+ result = await self.store.get_next_staged_event_id_for_room(room_id)
+ if result is None:
latest_origin = None
latest_event = None
+ else:
+ next_origin, next_event_id = result
+ if (
+ next_origin != latest_origin
+ or next_event_id != latest_event.event_id
+ ):
+ latest_origin = None
+ latest_event = None
if latest_origin is None or latest_event is None:
next = await self.store.get_next_staged_event_for_room(
@@ -988,10 +1006,9 @@ class FederationServer(FederationBase):
# has started processing).
while True:
async with lock:
+ logger.info("handling received PDU: %s", event)
try:
- await self.handler.on_receive_pdu(
- origin, event, sent_to_us_directly=True
- )
+ await self._federation_event_handler.on_receive_pdu(origin, event)
except FederationError as e:
# XXX: Ideally we'd inform the remote we failed to process
# the event, but we can't return an error in the transaction
@@ -1024,6 +1041,23 @@ class FederationServer(FederationBase):
origin, event = next
+ # Prune the event queue if it's getting large.
+ #
+ # We do this *after* handling the first event as the common case is
+ # that the queue is empty (/has the single event in), and so there's
+ # no need to do this check.
+ pruned = await self.store.prune_staged_events_in_room(room_id, room_version)
+ if pruned:
+ # If we have pruned the queue check we need to refetch the next
+ # event to handle.
+ next = await self.store.get_next_staged_event_for_room(
+ room_id, room_version
+ )
+ if not next:
+ break
+
+ origin, event = next
+
lock = await self.store.try_acquire_lock(
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
)
diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
index 2f9c9bc2cd..4fead6ca29 100644
--- a/synapse/federation/persistence.py
+++ b/synapse/federation/persistence.py
@@ -45,7 +45,7 @@ class TransactionActions:
`None` if we have not previously responded to this transaction or a
2-tuple of `(int, dict)` representing the response code and response body.
"""
- transaction_id = transaction.transaction_id # type: ignore
+ transaction_id = transaction.transaction_id
if not transaction_id:
raise RuntimeError("Cannot persist a transaction with no transaction_id")
@@ -56,7 +56,7 @@ class TransactionActions:
self, origin: str, transaction: Transaction, code: int, response: JsonDict
) -> None:
"""Persist how we responded to a transaction."""
- transaction_id = transaction.transaction_id # type: ignore
+ transaction_id = transaction.transaction_id
if not transaction_id:
raise RuntimeError("Cannot persist a transaction with no transaction_id")
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 72a635830b..dc555cca0b 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -27,6 +27,7 @@ from synapse.logging.opentracing import (
tags,
whitelisted_homeserver,
)
+from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.metrics import measure_func
@@ -104,13 +105,13 @@ class TransactionManager:
len(edus),
)
- transaction = Transaction.create_new(
+ transaction = Transaction(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
origin=self._server_name,
destination=destination,
- pdus=pdus,
- edus=edus,
+ pdus=[p.get_pdu_json() for p in pdus],
+ edus=[edu.get_dict() for edu in edus],
)
self._next_txn_id += 1
@@ -131,7 +132,7 @@ class TransactionManager:
# FIXME (richardv): I also believe it no longer works. We (now?) store
# "age_ts" in "unsigned" rather than at the top level. See
# https://github.com/matrix-org/synapse/issues/8429.
- def json_data_cb():
+ def json_data_cb() -> JsonDict:
data = transaction.get_dict()
now = int(self.clock.time_msec())
if "pdus" in data:
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 6a8d3ad4fe..8b247fe206 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -143,7 +143,7 @@ class TransportLayerClient:
"""Sends the given Transaction to its destination
Args:
- transaction (Transaction)
+ transaction
Returns:
Succeeds when we get a 2xx HTTP response. The result
@@ -1177,6 +1177,28 @@ class TransportLayerClient:
destination=destination, path=path, data=params
)
+ async def get_room_hierarchy(
+ self,
+ destination: str,
+ room_id: str,
+ suggested_only: bool,
+ ) -> JsonDict:
+ """
+ Args:
+ destination: The remote server
+ room_id: The room ID to ask about.
+ suggested_only: if True, only suggested rooms will be returned
+ """
+ path = _create_path(
+ FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/hierarchy/%s", room_id
+ )
+
+ return await self.client.get_json(
+ destination=destination,
+ path=path,
+ args={"suggested_only": "true" if suggested_only else "false"},
+ )
+
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
"""
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
deleted file mode 100644
index 5e059d6e09..0000000000
--- a/synapse/federation/transport/server.py
+++ /dev/null
@@ -1,2139 +0,0 @@
-# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
-# Copyright 2020 Sorunome
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import functools
-import logging
-import re
-from typing import (
- Container,
- Dict,
- List,
- Mapping,
- Optional,
- Sequence,
- Tuple,
- Type,
- Union,
-)
-
-from typing_extensions import Literal
-
-import synapse
-from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
-from synapse.api.errors import Codes, FederationDeniedError, SynapseError
-from synapse.api.room_versions import RoomVersions
-from synapse.api.urls import (
- FEDERATION_UNSTABLE_PREFIX,
- FEDERATION_V1_PREFIX,
- FEDERATION_V2_PREFIX,
-)
-from synapse.handlers.groups_local import GroupsLocalHandler
-from synapse.http.server import HttpServer, JsonResource
-from synapse.http.servlet import (
- parse_boolean_from_args,
- parse_integer_from_args,
- parse_json_object_from_request,
- parse_string_from_args,
- parse_strings_from_args,
-)
-from synapse.logging import opentracing
-from synapse.logging.context import run_in_background
-from synapse.logging.opentracing import (
- SynapseTags,
- start_active_span,
- start_active_span_from_request,
- tags,
- whitelisted_homeserver,
-)
-from synapse.server import HomeServer
-from synapse.types import JsonDict, ThirdPartyInstanceID, get_domain_from_id
-from synapse.util.ratelimitutils import FederationRateLimiter
-from synapse.util.stringutils import parse_and_validate_server_name
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger(__name__)
-
-
-class TransportLayerServer(JsonResource):
- """Handles incoming federation HTTP requests"""
-
- def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None):
- """Initialize the TransportLayerServer
-
- Will by default register all servlets. For custom behaviour, pass in
- a list of servlet_groups to register.
-
- Args:
- hs: homeserver
- servlet_groups: List of servlet groups to register.
- Defaults to ``DEFAULT_SERVLET_GROUPS``.
- """
- self.hs = hs
- self.clock = hs.get_clock()
- self.servlet_groups = servlet_groups
-
- super().__init__(hs, canonical_json=False)
-
- self.authenticator = Authenticator(hs)
- self.ratelimiter = hs.get_federation_ratelimiter()
-
- self.register_servlets()
-
- def register_servlets(self) -> None:
- register_servlets(
- self.hs,
- resource=self,
- ratelimiter=self.ratelimiter,
- authenticator=self.authenticator,
- servlet_groups=self.servlet_groups,
- )
-
-
-class AuthenticationError(SynapseError):
- """There was a problem authenticating the request"""
-
-
-class NoAuthenticationError(AuthenticationError):
- """The request had no authentication information"""
-
-
-class Authenticator:
- def __init__(self, hs: HomeServer):
- self._clock = hs.get_clock()
- self.keyring = hs.get_keyring()
- self.server_name = hs.hostname
- self.store = hs.get_datastore()
- self.federation_domain_whitelist = hs.config.federation_domain_whitelist
- self.notifier = hs.get_notifier()
-
- self.replication_client = None
- if hs.config.worker.worker_app:
- self.replication_client = hs.get_tcp_replication()
-
- # A method just so we can pass 'self' as the authenticator to the Servlets
- async def authenticate_request(self, request, content):
- now = self._clock.time_msec()
- json_request = {
- "method": request.method.decode("ascii"),
- "uri": request.uri.decode("ascii"),
- "destination": self.server_name,
- "signatures": {},
- }
-
- if content is not None:
- json_request["content"] = content
-
- origin = None
-
- auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
-
- if not auth_headers:
- raise NoAuthenticationError(
- 401, "Missing Authorization headers", Codes.UNAUTHORIZED
- )
-
- for auth in auth_headers:
- if auth.startswith(b"X-Matrix"):
- (origin, key, sig) = _parse_auth_header(auth)
- json_request["origin"] = origin
- json_request["signatures"].setdefault(origin, {})[key] = sig
-
- if (
- self.federation_domain_whitelist is not None
- and origin not in self.federation_domain_whitelist
- ):
- raise FederationDeniedError(origin)
-
- if origin is None or not json_request["signatures"]:
- raise NoAuthenticationError(
- 401, "Missing Authorization headers", Codes.UNAUTHORIZED
- )
-
- await self.keyring.verify_json_for_server(
- origin,
- json_request,
- now,
- )
-
- logger.debug("Request from %s", origin)
- request.requester = origin
-
- # If we get a valid signed request from the other side, its probably
- # alive
- retry_timings = await self.store.get_destination_retry_timings(origin)
- if retry_timings and retry_timings.retry_last_ts:
- run_in_background(self._reset_retry_timings, origin)
-
- return origin
-
- async def _reset_retry_timings(self, origin):
- try:
- logger.info("Marking origin %r as up", origin)
- await self.store.set_destination_retry_timings(origin, None, 0, 0)
-
- # Inform the relevant places that the remote server is back up.
- self.notifier.notify_remote_server_up(origin)
- if self.replication_client:
- # If we're on a worker we try and inform master about this. The
- # replication client doesn't hook into the notifier to avoid
- # infinite loops where we send a `REMOTE_SERVER_UP` command to
- # master, which then echoes it back to us which in turn pokes
- # the notifier.
- self.replication_client.send_remote_server_up(origin)
-
- except Exception:
- logger.exception("Error resetting retry timings on %s", origin)
-
-
-def _parse_auth_header(header_bytes):
- """Parse an X-Matrix auth header
-
- Args:
- header_bytes (bytes): header value
-
- Returns:
- Tuple[str, str, str]: origin, key id, signature.
-
- Raises:
- AuthenticationError if the header could not be parsed
- """
- try:
- header_str = header_bytes.decode("utf-8")
- params = header_str.split(" ")[1].split(",")
- param_dict = dict(kv.split("=") for kv in params)
-
- def strip_quotes(value):
- if value.startswith('"'):
- return value[1:-1]
- else:
- return value
-
- origin = strip_quotes(param_dict["origin"])
-
- # ensure that the origin is a valid server name
- parse_and_validate_server_name(origin)
-
- key = strip_quotes(param_dict["key"])
- sig = strip_quotes(param_dict["sig"])
- return origin, key, sig
- except Exception as e:
- logger.warning(
- "Error parsing auth header '%s': %s",
- header_bytes.decode("ascii", "replace"),
- e,
- )
- raise AuthenticationError(
- 400, "Malformed Authorization header", Codes.UNAUTHORIZED
- )
-
-
-class BaseFederationServlet:
- """Abstract base class for federation servlet classes.
-
- The servlet object should have a PATH attribute which takes the form of a regexp to
- match against the request path (excluding the /federation/v1 prefix).
-
- The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
- the appropriate HTTP method. These methods must be *asynchronous* and have the
- signature:
-
- on_<METHOD>(self, origin, content, query, **kwargs)
-
- With arguments:
-
- origin (unicode|None): The authenticated server_name of the calling server,
- unless REQUIRE_AUTH is set to False and authentication failed.
-
- content (unicode|None): decoded json body of the request. None if the
- request was a GET.
-
- query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
- (ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
- yet.
-
- **kwargs (dict[unicode, unicode]): the dict mapping keys to path
- components as specified in the path match regexp.
-
- Returns:
- Optional[Tuple[int, object]]: either (response code, response object) to
- return a JSON response, or None if the request has already been handled.
-
- Raises:
- SynapseError: to return an error code
-
- Exception: other exceptions will be caught, logged, and a 500 will be
- returned.
- """
-
- PATH = "" # Overridden in subclasses, the regex to match against the path.
-
- REQUIRE_AUTH = True
-
- PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
-
- RATELIMIT = True # Whether to rate limit requests or not
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- self.hs = hs
- self.authenticator = authenticator
- self.ratelimiter = ratelimiter
- self.server_name = server_name
-
- def _wrap(self, func):
- authenticator = self.authenticator
- ratelimiter = self.ratelimiter
-
- @functools.wraps(func)
- async def new_func(request, *args, **kwargs):
- """A callback which can be passed to HttpServer.RegisterPaths
-
- Args:
- request (twisted.web.http.Request):
- *args: unused?
- **kwargs (dict[unicode, unicode]): the dict mapping keys to path
- components as specified in the path match regexp.
-
- Returns:
- Tuple[int, object]|None: (response code, response object) as returned by
- the callback method. None if the request has already been handled.
- """
- content = None
- if request.method in [b"PUT", b"POST"]:
- # TODO: Handle other method types? other content types?
- content = parse_json_object_from_request(request)
-
- try:
- origin = await authenticator.authenticate_request(request, content)
- except NoAuthenticationError:
- origin = None
- if self.REQUIRE_AUTH:
- logger.warning(
- "authenticate_request failed: missing authentication"
- )
- raise
- except Exception as e:
- logger.warning("authenticate_request failed: %s", e)
- raise
-
- request_tags = {
- SynapseTags.REQUEST_ID: request.get_request_id(),
- tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
- tags.HTTP_METHOD: request.get_method(),
- tags.HTTP_URL: request.get_redacted_uri(),
- tags.PEER_HOST_IPV6: request.getClientIP(),
- "authenticated_entity": origin,
- "servlet_name": request.request_metrics.name,
- }
-
- # Only accept the span context if the origin is authenticated
- # and whitelisted
- if origin and whitelisted_homeserver(origin):
- scope = start_active_span_from_request(
- request, "incoming-federation-request", tags=request_tags
- )
- else:
- scope = start_active_span(
- "incoming-federation-request", tags=request_tags
- )
-
- with scope:
- opentracing.inject_response_headers(request.responseHeaders)
-
- if origin and self.RATELIMIT:
- with ratelimiter.ratelimit(origin) as d:
- await d
- if request._disconnected:
- logger.warning(
- "client disconnected before we started processing "
- "request"
- )
- return -1, None
- response = await func(
- origin, content, request.args, *args, **kwargs
- )
- else:
- response = await func(
- origin, content, request.args, *args, **kwargs
- )
-
- return response
-
- return new_func
-
- def register(self, server):
- pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
-
- for method in ("GET", "PUT", "POST"):
- code = getattr(self, "on_%s" % (method), None)
- if code is None:
- continue
-
- server.register_paths(
- method,
- (pattern,),
- self._wrap(code),
- self.__class__.__name__,
- )
-
-
-class BaseFederationServerServlet(BaseFederationServlet):
- """Abstract base class for federation servlet classes which provides a federation server handler.
-
- See BaseFederationServlet for more information.
- """
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.handler = hs.get_federation_server()
-
-
-class FederationSendServlet(BaseFederationServerServlet):
- PATH = "/send/(?P<transaction_id>[^/]*)/?"
-
- # We ratelimit manually in the handler as we queue up the requests and we
- # don't want to fill up the ratelimiter with blocked requests.
- RATELIMIT = False
-
- # This is when someone is trying to send us a bunch of data.
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- transaction_id: str,
- ) -> Tuple[int, JsonDict]:
- """Called on PUT /send/<transaction_id>/
-
- Args:
- transaction_id: The transaction_id associated with this request. This
- is *not* None.
-
- Returns:
- Tuple of `(code, response)`, where
- `response` is a python dict to be converted into JSON that is
- used as the response body.
- """
- # Parse the request
- try:
- transaction_data = content
-
- logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
-
- logger.info(
- "Received txn %s from %s. (PDUs: %d, EDUs: %d)",
- transaction_id,
- origin,
- len(transaction_data.get("pdus", [])),
- len(transaction_data.get("edus", [])),
- )
-
- # We should ideally be getting this from the security layer.
- # origin = body["origin"]
-
- # Add some extra data to the transaction dict that isn't included
- # in the request body.
- transaction_data.update(
- transaction_id=transaction_id, destination=self.server_name
- )
-
- except Exception as e:
- logger.exception(e)
- return 400, {"error": "Invalid transaction"}
-
- code, response = await self.handler.on_incoming_transaction(
- origin, transaction_data
- )
-
- return code, response
-
-
-class FederationEventServlet(BaseFederationServerServlet):
- PATH = "/event/(?P<event_id>[^/]*)/?"
-
- # This is when someone asks for a data item for a given server data_id pair.
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- event_id: str,
- ) -> Tuple[int, Union[JsonDict, str]]:
- return await self.handler.on_pdu_request(origin, event_id)
-
-
-class FederationStateV1Servlet(BaseFederationServerServlet):
- PATH = "/state/(?P<room_id>[^/]*)/?"
-
- # This is when someone asks for all data for a given room.
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- return await self.handler.on_room_state_request(
- origin,
- room_id,
- parse_string_from_args(query, "event_id", None, required=False),
- )
-
-
-class FederationStateIdsServlet(BaseFederationServerServlet):
- PATH = "/state_ids/(?P<room_id>[^/]*)/?"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- return await self.handler.on_state_ids_request(
- origin,
- room_id,
- parse_string_from_args(query, "event_id", None, required=True),
- )
-
-
-class FederationBackfillServlet(BaseFederationServerServlet):
- PATH = "/backfill/(?P<room_id>[^/]*)/?"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- versions = [x.decode("ascii") for x in query[b"v"]]
- limit = parse_integer_from_args(query, "limit", None)
-
- if not limit:
- return 400, {"error": "Did not include limit param"}
-
- return await self.handler.on_backfill_request(origin, room_id, versions, limit)
-
-
-class FederationQueryServlet(BaseFederationServerServlet):
- PATH = "/query/(?P<query_type>[^/]*)"
-
- # This is when we receive a server-server Query
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- query_type: str,
- ) -> Tuple[int, JsonDict]:
- args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
- args["origin"] = origin
- return await self.handler.on_query_request(query_type, args)
-
-
-class FederationMakeJoinServlet(BaseFederationServerServlet):
- PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- """
- Args:
- origin: The authenticated server_name of the calling server
-
- content: (GETs don't have bodies)
-
- query: Query params from the request.
-
- **kwargs: the dict mapping keys to path components as specified in
- the path match regexp.
-
- Returns:
- Tuple of (response code, response object)
- """
- supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
- if supported_versions is None:
- supported_versions = ["1"]
-
- result = await self.handler.on_make_join_request(
- origin, room_id, user_id, supported_versions=supported_versions
- )
- return 200, result
-
-
-class FederationMakeLeaveServlet(BaseFederationServerServlet):
- PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- result = await self.handler.on_make_leave_request(origin, room_id, user_id)
- return 200, result
-
-
-class FederationV1SendLeaveServlet(BaseFederationServerServlet):
- PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, Tuple[int, JsonDict]]:
- result = await self.handler.on_send_leave_request(origin, content, room_id)
- return 200, (200, result)
-
-
-class FederationV2SendLeaveServlet(BaseFederationServerServlet):
- PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- PREFIX = FEDERATION_V2_PREFIX
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, JsonDict]:
- result = await self.handler.on_send_leave_request(origin, content, room_id)
- return 200, result
-
-
-class FederationMakeKnockServlet(BaseFederationServerServlet):
- PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- # Retrieve the room versions the remote homeserver claims to support
- supported_versions = parse_strings_from_args(
- query, "ver", required=True, encoding="utf-8"
- )
-
- result = await self.handler.on_make_knock_request(
- origin, room_id, user_id, supported_versions=supported_versions
- )
- return 200, result
-
-
-class FederationV1SendKnockServlet(BaseFederationServerServlet):
- PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, JsonDict]:
- result = await self.handler.on_send_knock_request(origin, content, room_id)
- return 200, result
-
-
-class FederationEventAuthServlet(BaseFederationServerServlet):
- PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, JsonDict]:
- return await self.handler.on_event_auth(origin, room_id, event_id)
-
-
-class FederationV1SendJoinServlet(BaseFederationServerServlet):
- PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, Tuple[int, JsonDict]]:
- # TODO(paul): assert that event_id parsed from path actually
- # match those given in content
- result = await self.handler.on_send_join_request(origin, content, room_id)
- return 200, (200, result)
-
-
-class FederationV2SendJoinServlet(BaseFederationServerServlet):
- PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- PREFIX = FEDERATION_V2_PREFIX
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, JsonDict]:
- # TODO(paul): assert that event_id parsed from path actually
- # match those given in content
- result = await self.handler.on_send_join_request(origin, content, room_id)
- return 200, result
-
-
-class FederationV1InviteServlet(BaseFederationServerServlet):
- PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, Tuple[int, JsonDict]]:
- # We don't get a room version, so we have to assume its EITHER v1 or
- # v2. This is "fine" as the only difference between V1 and V2 is the
- # state resolution algorithm, and we don't use that for processing
- # invites
- result = await self.handler.on_invite_request(
- origin, content, room_version_id=RoomVersions.V1.identifier
- )
-
- # V1 federation API is defined to return a content of `[200, {...}]`
- # due to a historical bug.
- return 200, (200, result)
-
-
-class FederationV2InviteServlet(BaseFederationServerServlet):
- PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
-
- PREFIX = FEDERATION_V2_PREFIX
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- event_id: str,
- ) -> Tuple[int, JsonDict]:
- # TODO(paul): assert that room_id/event_id parsed from path actually
- # match those given in content
-
- room_version = content["room_version"]
- event = content["event"]
- invite_room_state = content["invite_room_state"]
-
- # Synapse expects invite_room_state to be in unsigned, as it is in v1
- # API
-
- event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
-
- result = await self.handler.on_invite_request(
- origin, event, room_version_id=room_version
- )
- return 200, result
-
-
-class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
- PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- await self.handler.on_exchange_third_party_invite_request(content)
- return 200, {}
-
-
-class FederationClientKeysQueryServlet(BaseFederationServerServlet):
- PATH = "/user/keys/query"
-
- async def on_POST(
- self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
- ) -> Tuple[int, JsonDict]:
- return await self.handler.on_query_client_keys(origin, content)
-
-
-class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
- PATH = "/user/devices/(?P<user_id>[^/]*)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- return await self.handler.on_query_user_devices(origin, user_id)
-
-
-class FederationClientKeysClaimServlet(BaseFederationServerServlet):
- PATH = "/user/keys/claim"
-
- async def on_POST(
- self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
- ) -> Tuple[int, JsonDict]:
- response = await self.handler.on_claim_client_keys(origin, content)
- return 200, response
-
-
-class FederationGetMissingEventsServlet(BaseFederationServerServlet):
- # TODO(paul): Why does this path alone end with "/?" optional?
- PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- limit = int(content.get("limit", 10))
- earliest_events = content.get("earliest_events", [])
- latest_events = content.get("latest_events", [])
-
- result = await self.handler.on_get_missing_events(
- origin,
- room_id=room_id,
- earliest_events=earliest_events,
- latest_events=latest_events,
- limit=limit,
- )
-
- return 200, result
-
-
-class On3pidBindServlet(BaseFederationServerServlet):
- PATH = "/3pid/onbind"
-
- REQUIRE_AUTH = False
-
- async def on_POST(
- self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
- ) -> Tuple[int, JsonDict]:
- if "invites" in content:
- last_exception = None
- for invite in content["invites"]:
- try:
- if "signed" not in invite or "token" not in invite["signed"]:
- message = (
- "Rejecting received notification of third-"
- "party invite without signed: %s" % (invite,)
- )
- logger.info(message)
- raise SynapseError(400, message)
- await self.handler.exchange_third_party_invite(
- invite["sender"],
- invite["mxid"],
- invite["room_id"],
- invite["signed"],
- )
- except Exception as e:
- last_exception = e
- if last_exception:
- raise last_exception
- return 200, {}
-
-
-class OpenIdUserInfo(BaseFederationServerServlet):
- """
- Exchange a bearer token for information about a user.
-
- The response format should be compatible with:
- http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
-
- GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
-
- HTTP/1.1 200 OK
- Content-Type: application/json
-
- {
- "sub": "@userpart:example.org",
- }
- """
-
- PATH = "/openid/userinfo"
-
- REQUIRE_AUTH = False
-
- async def on_GET(
- self,
- origin: Optional[str],
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- ) -> Tuple[int, JsonDict]:
- token = parse_string_from_args(query, "access_token")
- if token is None:
- return (
- 401,
- {"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
- )
-
- user_id = await self.handler.on_openid_userinfo(token)
-
- if user_id is None:
- return (
- 401,
- {
- "errcode": "M_UNKNOWN_TOKEN",
- "error": "Access Token unknown or expired",
- },
- )
-
- return 200, {"sub": user_id}
-
-
-class PublicRoomList(BaseFederationServlet):
- """
- Fetch the public room list for this server.
-
- This API returns information in the same format as /publicRooms on the
- client API, but will only ever include local public rooms and hence is
- intended for consumption by other homeservers.
-
- GET /publicRooms HTTP/1.1
-
- HTTP/1.1 200 OK
- Content-Type: application/json
-
- {
- "chunk": [
- {
- "aliases": [
- "#test:localhost"
- ],
- "guest_can_join": false,
- "name": "test room",
- "num_joined_members": 3,
- "room_id": "!whkydVegtvatLfXmPN:localhost",
- "world_readable": false
- }
- ],
- "end": "END",
- "start": "START"
- }
- """
-
- PATH = "/publicRooms"
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- allow_access: bool,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.handler = hs.get_room_list_handler()
- self.allow_access = allow_access
-
- async def on_GET(
- self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
- ) -> Tuple[int, JsonDict]:
- if not self.allow_access:
- raise FederationDeniedError(origin)
-
- limit = parse_integer_from_args(query, "limit", 0)
- since_token = parse_string_from_args(query, "since", None)
- include_all_networks = parse_boolean_from_args(
- query, "include_all_networks", default=False
- )
- third_party_instance_id = parse_string_from_args(
- query, "third_party_instance_id", None
- )
-
- if include_all_networks:
- network_tuple = None
- elif third_party_instance_id:
- network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
- else:
- network_tuple = ThirdPartyInstanceID(None, None)
-
- if limit == 0:
- # zero is a special value which corresponds to no limit.
- limit = None
-
- data = await self.handler.get_local_public_room_list(
- limit, since_token, network_tuple=network_tuple, from_federation=True
- )
- return 200, data
-
- async def on_POST(
- self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
- ) -> Tuple[int, JsonDict]:
- # This implements MSC2197 (Search Filtering over Federation)
- if not self.allow_access:
- raise FederationDeniedError(origin)
-
- limit: Optional[int] = int(content.get("limit", 100))
- since_token = content.get("since", None)
- search_filter = content.get("filter", None)
-
- include_all_networks = content.get("include_all_networks", False)
- third_party_instance_id = content.get("third_party_instance_id", None)
-
- if include_all_networks:
- network_tuple = None
- if third_party_instance_id is not None:
- raise SynapseError(
- 400, "Can't use include_all_networks with an explicit network"
- )
- elif third_party_instance_id is None:
- network_tuple = ThirdPartyInstanceID(None, None)
- else:
- network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
-
- if search_filter is None:
- logger.warning("Nonefilter")
-
- if limit == 0:
- # zero is a special value which corresponds to no limit.
- limit = None
-
- data = await self.handler.get_local_public_room_list(
- limit=limit,
- since_token=since_token,
- search_filter=search_filter,
- network_tuple=network_tuple,
- from_federation=True,
- )
-
- return 200, data
-
-
-class FederationVersionServlet(BaseFederationServlet):
- PATH = "/version"
-
- REQUIRE_AUTH = False
-
- async def on_GET(
- self,
- origin: Optional[str],
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- ) -> Tuple[int, JsonDict]:
- return (
- 200,
- {"server": {"name": "Synapse", "version": get_version_string(synapse)}},
- )
-
-
-class BaseGroupsServerServlet(BaseFederationServlet):
- """Abstract base class for federation servlet classes which provides a groups server handler.
-
- See BaseFederationServlet for more information.
- """
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.handler = hs.get_groups_server_handler()
-
-
-class FederationGroupsProfileServlet(BaseGroupsServerServlet):
- """Get/set the basic profile of a group on behalf of a user"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/profile"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.get_group_profile(group_id, requester_user_id)
-
- return 200, new_content
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.update_group_profile(
- group_id, requester_user_id, content
- )
-
- return 200, new_content
-
-
-class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
- PATH = "/groups/(?P<group_id>[^/]*)/summary"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.get_group_summary(group_id, requester_user_id)
-
- return 200, new_content
-
-
-class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
- """Get the rooms in a group on behalf of a user"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/rooms"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
-
- return 200, new_content
-
-
-class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
- """Add/remove room from group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.add_room_to_group(
- group_id, requester_user_id, room_id, content
- )
-
- return 200, new_content
-
- async def on_DELETE(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.remove_room_from_group(
- group_id, requester_user_id, room_id
- )
-
- return 200, new_content
-
-
-class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
- """Update room config in group"""
-
- PATH = (
- "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
- "/config/(?P<config_key>[^/]*)"
- )
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- room_id: str,
- config_key: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- result = await self.handler.update_room_in_group(
- group_id, requester_user_id, room_id, config_key, content
- )
-
- return 200, result
-
-
-class FederationGroupsUsersServlet(BaseGroupsServerServlet):
- """Get the users in a group on behalf of a user"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/users"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
-
- return 200, new_content
-
-
-class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
- """Get the users that have been invited to a group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.get_invited_users_in_group(
- group_id, requester_user_id
- )
-
- return 200, new_content
-
-
-class FederationGroupsInviteServlet(BaseGroupsServerServlet):
- """Ask a group server to invite someone to the group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.invite_to_group(
- group_id, user_id, requester_user_id, content
- )
-
- return 200, new_content
-
-
-class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
- """Accept an invitation from the group server"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- if get_domain_from_id(user_id) != origin:
- raise SynapseError(403, "user_id doesn't match origin")
-
- new_content = await self.handler.accept_invite(group_id, user_id, content)
-
- return 200, new_content
-
-
-class FederationGroupsJoinServlet(BaseGroupsServerServlet):
- """Attempt to join a group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- if get_domain_from_id(user_id) != origin:
- raise SynapseError(403, "user_id doesn't match origin")
-
- new_content = await self.handler.join_group(group_id, user_id, content)
-
- return 200, new_content
-
-
-class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
- """Leave or kick a user from the group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.remove_user_from_group(
- group_id, user_id, requester_user_id, content
- )
-
- return 200, new_content
-
-
-class BaseGroupsLocalServlet(BaseFederationServlet):
- """Abstract base class for federation servlet classes which provides a groups local handler.
-
- See BaseFederationServlet for more information.
- """
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.handler = hs.get_groups_local_handler()
-
-
-class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
- """A group server has invited a local user"""
-
- PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- if get_domain_from_id(group_id) != origin:
- raise SynapseError(403, "group_id doesn't match origin")
-
- assert isinstance(
- self.handler, GroupsLocalHandler
- ), "Workers cannot handle group invites."
-
- new_content = await self.handler.on_invite(group_id, user_id, content)
-
- return 200, new_content
-
-
-class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
- """A group server has removed a local user"""
-
- PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, None]:
- if get_domain_from_id(group_id) != origin:
- raise SynapseError(403, "user_id doesn't match origin")
-
- assert isinstance(
- self.handler, GroupsLocalHandler
- ), "Workers cannot handle group removals."
-
- await self.handler.user_removed_from_group(group_id, user_id, content)
-
- return 200, None
-
-
-class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
- """A group or user's server renews their attestation"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.handler = hs.get_groups_attestation_renewer()
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- # We don't need to check auth here as we check the attestation signatures
-
- new_content = await self.handler.on_renew_attestation(
- group_id, user_id, content
- )
-
- return 200, new_content
-
-
-class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
- """Add/remove a room from the group summary, with optional category.
-
- Matches both:
- - /groups/:group/summary/rooms/:room_id
- - /groups/:group/summary/categories/:category/rooms/:room_id
- """
-
- PATH = (
- "/groups/(?P<group_id>[^/]*)/summary"
- "(/categories/(?P<category_id>[^/]+))?"
- "/rooms/(?P<room_id>[^/]*)"
- )
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- category_id: str,
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if category_id == "":
- raise SynapseError(
- 400, "category_id cannot be empty string", Codes.INVALID_PARAM
- )
-
- if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
- raise SynapseError(
- 400,
- "category_id may not be longer than %s characters"
- % (MAX_GROUP_CATEGORYID_LENGTH,),
- Codes.INVALID_PARAM,
- )
-
- resp = await self.handler.update_group_summary_room(
- group_id,
- requester_user_id,
- room_id=room_id,
- category_id=category_id,
- content=content,
- )
-
- return 200, resp
-
- async def on_DELETE(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- category_id: str,
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if category_id == "":
- raise SynapseError(400, "category_id cannot be empty string")
-
- resp = await self.handler.delete_group_summary_room(
- group_id, requester_user_id, room_id=room_id, category_id=category_id
- )
-
- return 200, resp
-
-
-class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
- """Get all categories for a group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- resp = await self.handler.get_group_categories(group_id, requester_user_id)
-
- return 200, resp
-
-
-class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
- """Add/remove/get a category in a group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- category_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- resp = await self.handler.get_group_category(
- group_id, requester_user_id, category_id
- )
-
- return 200, resp
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- category_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if category_id == "":
- raise SynapseError(400, "category_id cannot be empty string")
-
- if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
- raise SynapseError(
- 400,
- "category_id may not be longer than %s characters"
- % (MAX_GROUP_CATEGORYID_LENGTH,),
- Codes.INVALID_PARAM,
- )
-
- resp = await self.handler.upsert_group_category(
- group_id, requester_user_id, category_id, content
- )
-
- return 200, resp
-
- async def on_DELETE(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- category_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if category_id == "":
- raise SynapseError(400, "category_id cannot be empty string")
-
- resp = await self.handler.delete_group_category(
- group_id, requester_user_id, category_id
- )
-
- return 200, resp
-
-
-class FederationGroupsRolesServlet(BaseGroupsServerServlet):
- """Get roles in a group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- resp = await self.handler.get_group_roles(group_id, requester_user_id)
-
- return 200, resp
-
-
-class FederationGroupsRoleServlet(BaseGroupsServerServlet):
- """Add/remove/get a role in a group"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- role_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
-
- return 200, resp
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- role_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if role_id == "":
- raise SynapseError(
- 400, "role_id cannot be empty string", Codes.INVALID_PARAM
- )
-
- if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
- raise SynapseError(
- 400,
- "role_id may not be longer than %s characters"
- % (MAX_GROUP_ROLEID_LENGTH,),
- Codes.INVALID_PARAM,
- )
-
- resp = await self.handler.update_group_role(
- group_id, requester_user_id, role_id, content
- )
-
- return 200, resp
-
- async def on_DELETE(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- role_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if role_id == "":
- raise SynapseError(400, "role_id cannot be empty string")
-
- resp = await self.handler.delete_group_role(
- group_id, requester_user_id, role_id
- )
-
- return 200, resp
-
-
-class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
- """Add/remove a user from the group summary, with optional role.
-
- Matches both:
- - /groups/:group/summary/users/:user_id
- - /groups/:group/summary/roles/:role/users/:user_id
- """
-
- PATH = (
- "/groups/(?P<group_id>[^/]*)/summary"
- "(/roles/(?P<role_id>[^/]+))?"
- "/users/(?P<user_id>[^/]*)"
- )
-
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- role_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if role_id == "":
- raise SynapseError(400, "role_id cannot be empty string")
-
- if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
- raise SynapseError(
- 400,
- "role_id may not be longer than %s characters"
- % (MAX_GROUP_ROLEID_LENGTH,),
- Codes.INVALID_PARAM,
- )
-
- resp = await self.handler.update_group_summary_user(
- group_id,
- requester_user_id,
- user_id=user_id,
- role_id=role_id,
- content=content,
- )
-
- return 200, resp
-
- async def on_DELETE(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- group_id: str,
- role_id: str,
- user_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- if role_id == "":
- raise SynapseError(400, "role_id cannot be empty string")
-
- resp = await self.handler.delete_group_summary_user(
- group_id, requester_user_id, user_id=user_id, role_id=role_id
- )
-
- return 200, resp
-
-
-class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
- """Get roles in a group"""
-
- PATH = "/get_groups_publicised"
-
- async def on_POST(
- self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
- ) -> Tuple[int, JsonDict]:
- resp = await self.handler.bulk_get_publicised_groups(
- content["user_ids"], proxy=False
- )
-
- return 200, resp
-
-
-class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
- """Sets whether a group is joinable without an invite or knock"""
-
- PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
-
- async def on_PUT(
- self,
- origin: str,
- content: JsonDict,
- query: Dict[bytes, List[bytes]],
- group_id: str,
- ) -> Tuple[int, JsonDict]:
- requester_user_id = parse_string_from_args(
- query, "requester_user_id", required=True
- )
- if get_domain_from_id(requester_user_id) != origin:
- raise SynapseError(403, "requester_user_id doesn't match origin")
-
- new_content = await self.handler.set_group_join_policy(
- group_id, requester_user_id, content
- )
-
- return 200, new_content
-
-
-class FederationSpaceSummaryServlet(BaseFederationServlet):
- PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
- PATH = "/spaces/(?P<room_id>[^/]*)"
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self.handler = hs.get_space_summary_handler()
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Mapping[bytes, Sequence[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
- max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
-
- exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[])
-
- return 200, await self.handler.federation_space_summary(
- origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
- )
-
- # TODO When switching to the stable endpoint, remove the POST handler.
- async def on_POST(
- self,
- origin: str,
- content: JsonDict,
- query: Mapping[bytes, Sequence[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- suggested_only = content.get("suggested_only", False)
- if not isinstance(suggested_only, bool):
- raise SynapseError(
- 400, "'suggested_only' must be a boolean", Codes.BAD_JSON
- )
-
- exclude_rooms = content.get("exclude_rooms", [])
- if not isinstance(exclude_rooms, list) or any(
- not isinstance(x, str) for x in exclude_rooms
- ):
- raise SynapseError(400, "bad value for 'exclude_rooms'", Codes.BAD_JSON)
-
- max_rooms_per_space = content.get("max_rooms_per_space")
- if max_rooms_per_space is not None and not isinstance(max_rooms_per_space, int):
- raise SynapseError(
- 400, "bad value for 'max_rooms_per_space'", Codes.BAD_JSON
- )
-
- return 200, await self.handler.federation_space_summary(
- origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
- )
-
-
-class RoomComplexityServlet(BaseFederationServlet):
- """
- Indicates to other servers how complex (and therefore likely
- resource-intensive) a public room this server knows about is.
- """
-
- PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
- PREFIX = FEDERATION_UNSTABLE_PREFIX
-
- def __init__(
- self,
- hs: HomeServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self._store = self.hs.get_datastore()
-
- async def on_GET(
- self,
- origin: str,
- content: Literal[None],
- query: Dict[bytes, List[bytes]],
- room_id: str,
- ) -> Tuple[int, JsonDict]:
- is_public = await self._store.is_room_world_readable_or_publicly_joinable(
- room_id
- )
-
- if not is_public:
- raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
-
- complexity = await self._store.get_room_complexity(room_id)
- return 200, complexity
-
-
-FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
- FederationSendServlet,
- FederationEventServlet,
- FederationStateV1Servlet,
- FederationStateIdsServlet,
- FederationBackfillServlet,
- FederationQueryServlet,
- FederationMakeJoinServlet,
- FederationMakeLeaveServlet,
- FederationEventServlet,
- FederationV1SendJoinServlet,
- FederationV2SendJoinServlet,
- FederationV1SendLeaveServlet,
- FederationV2SendLeaveServlet,
- FederationV1InviteServlet,
- FederationV2InviteServlet,
- FederationGetMissingEventsServlet,
- FederationEventAuthServlet,
- FederationClientKeysQueryServlet,
- FederationUserDevicesQueryServlet,
- FederationClientKeysClaimServlet,
- FederationThirdPartyInviteExchangeServlet,
- On3pidBindServlet,
- FederationVersionServlet,
- RoomComplexityServlet,
- FederationSpaceSummaryServlet,
- FederationV1SendKnockServlet,
- FederationMakeKnockServlet,
-)
-
-OPENID_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (OpenIdUserInfo,)
-
-ROOM_LIST_CLASSES: Tuple[Type[PublicRoomList], ...] = (PublicRoomList,)
-
-GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
- FederationGroupsProfileServlet,
- FederationGroupsSummaryServlet,
- FederationGroupsRoomsServlet,
- FederationGroupsUsersServlet,
- FederationGroupsInvitedUsersServlet,
- FederationGroupsInviteServlet,
- FederationGroupsAcceptInviteServlet,
- FederationGroupsJoinServlet,
- FederationGroupsRemoveUserServlet,
- FederationGroupsSummaryRoomsServlet,
- FederationGroupsCategoriesServlet,
- FederationGroupsCategoryServlet,
- FederationGroupsRolesServlet,
- FederationGroupsRoleServlet,
- FederationGroupsSummaryUsersServlet,
- FederationGroupsAddRoomsServlet,
- FederationGroupsAddRoomsConfigServlet,
- FederationGroupsSettingJoinPolicyServlet,
-)
-
-
-GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
- FederationGroupsLocalInviteServlet,
- FederationGroupsRemoveLocalUserServlet,
- FederationGroupsBulkPublicisedServlet,
-)
-
-
-GROUP_ATTESTATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
- FederationGroupsRenewAttestaionServlet,
-)
-
-
-DEFAULT_SERVLET_GROUPS = (
- "federation",
- "room_list",
- "group_server",
- "group_local",
- "group_attestation",
- "openid",
-)
-
-
-def register_servlets(
- hs: HomeServer,
- resource: HttpServer,
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- servlet_groups: Optional[Container[str]] = None,
-):
- """Initialize and register servlet classes.
-
- Will by default register all servlets. For custom behaviour, pass in
- a list of servlet_groups to register.
-
- Args:
- hs: homeserver
- resource: resource class to register to
- authenticator: authenticator to use
- ratelimiter: ratelimiter to use
- servlet_groups: List of servlet groups to register.
- Defaults to ``DEFAULT_SERVLET_GROUPS``.
- """
- if not servlet_groups:
- servlet_groups = DEFAULT_SERVLET_GROUPS
-
- if "federation" in servlet_groups:
- for servletclass in FEDERATION_SERVLET_CLASSES:
- servletclass(
- hs=hs,
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- ).register(resource)
-
- if "openid" in servlet_groups:
- for servletclass in OPENID_SERVLET_CLASSES:
- servletclass(
- hs=hs,
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- ).register(resource)
-
- if "room_list" in servlet_groups:
- for servletclass in ROOM_LIST_CLASSES:
- servletclass(
- hs=hs,
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- allow_access=hs.config.allow_public_rooms_over_federation,
- ).register(resource)
-
- if "group_server" in servlet_groups:
- for servletclass in GROUP_SERVER_SERVLET_CLASSES:
- servletclass(
- hs=hs,
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- ).register(resource)
-
- if "group_local" in servlet_groups:
- for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
- servletclass(
- hs=hs,
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- ).register(resource)
-
- if "group_attestation" in servlet_groups:
- for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
- servletclass(
- hs=hs,
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- ).register(resource)
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
new file mode 100644
index 0000000000..95176ba6f9
--- /dev/null
+++ b/synapse/federation/transport/server/__init__.py
@@ -0,0 +1,332 @@
+# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
+# Copyright 2020 Sorunome
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import Dict, Iterable, List, Optional, Tuple, Type
+
+from typing_extensions import Literal
+
+from synapse.api.errors import FederationDeniedError, SynapseError
+from synapse.federation.transport.server._base import (
+ Authenticator,
+ BaseFederationServlet,
+)
+from synapse.federation.transport.server.federation import FEDERATION_SERVLET_CLASSES
+from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES
+from synapse.federation.transport.server.groups_server import (
+ GROUP_SERVER_SERVLET_CLASSES,
+)
+from synapse.http.server import HttpServer, JsonResource
+from synapse.http.servlet import (
+ parse_boolean_from_args,
+ parse_integer_from_args,
+ parse_string_from_args,
+)
+from synapse.server import HomeServer
+from synapse.types import JsonDict, ThirdPartyInstanceID
+from synapse.util.ratelimitutils import FederationRateLimiter
+
+logger = logging.getLogger(__name__)
+
+
+class TransportLayerServer(JsonResource):
+ """Handles incoming federation HTTP requests"""
+
+ def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None):
+ """Initialize the TransportLayerServer
+
+ Will by default register all servlets. For custom behaviour, pass in
+ a list of servlet_groups to register.
+
+ Args:
+ hs: homeserver
+ servlet_groups: List of servlet groups to register.
+ Defaults to ``DEFAULT_SERVLET_GROUPS``.
+ """
+ self.hs = hs
+ self.clock = hs.get_clock()
+ self.servlet_groups = servlet_groups
+
+ super().__init__(hs, canonical_json=False)
+
+ self.authenticator = Authenticator(hs)
+ self.ratelimiter = hs.get_federation_ratelimiter()
+
+ self.register_servlets()
+
+ def register_servlets(self) -> None:
+ register_servlets(
+ self.hs,
+ resource=self,
+ ratelimiter=self.ratelimiter,
+ authenticator=self.authenticator,
+ servlet_groups=self.servlet_groups,
+ )
+
+
+class PublicRoomList(BaseFederationServlet):
+ """
+ Fetch the public room list for this server.
+
+ This API returns information in the same format as /publicRooms on the
+ client API, but will only ever include local public rooms and hence is
+ intended for consumption by other homeservers.
+
+ GET /publicRooms HTTP/1.1
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "chunk": [
+ {
+ "aliases": [
+ "#test:localhost"
+ ],
+ "guest_can_join": false,
+ "name": "test room",
+ "num_joined_members": 3,
+ "room_id": "!whkydVegtvatLfXmPN:localhost",
+ "world_readable": false
+ }
+ ],
+ "end": "END",
+ "start": "START"
+ }
+ """
+
+ PATH = "/publicRooms"
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_room_list_handler()
+ self.allow_access = hs.config.allow_public_rooms_over_federation
+
+ async def on_GET(
+ self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ if not self.allow_access:
+ raise FederationDeniedError(origin)
+
+ limit = parse_integer_from_args(query, "limit", 0)
+ since_token = parse_string_from_args(query, "since", None)
+ include_all_networks = parse_boolean_from_args(
+ query, "include_all_networks", default=False
+ )
+ third_party_instance_id = parse_string_from_args(
+ query, "third_party_instance_id", None
+ )
+
+ if include_all_networks:
+ network_tuple = None
+ elif third_party_instance_id:
+ network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
+ else:
+ network_tuple = ThirdPartyInstanceID(None, None)
+
+ if limit == 0:
+ # zero is a special value which corresponds to no limit.
+ limit = None
+
+ data = await self.handler.get_local_public_room_list(
+ limit, since_token, network_tuple=network_tuple, from_federation=True
+ )
+ return 200, data
+
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ # This implements MSC2197 (Search Filtering over Federation)
+ if not self.allow_access:
+ raise FederationDeniedError(origin)
+
+ limit: Optional[int] = int(content.get("limit", 100))
+ since_token = content.get("since", None)
+ search_filter = content.get("filter", None)
+
+ include_all_networks = content.get("include_all_networks", False)
+ third_party_instance_id = content.get("third_party_instance_id", None)
+
+ if include_all_networks:
+ network_tuple = None
+ if third_party_instance_id is not None:
+ raise SynapseError(
+ 400, "Can't use include_all_networks with an explicit network"
+ )
+ elif third_party_instance_id is None:
+ network_tuple = ThirdPartyInstanceID(None, None)
+ else:
+ network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
+
+ if search_filter is None:
+ logger.warning("Nonefilter")
+
+ if limit == 0:
+ # zero is a special value which corresponds to no limit.
+ limit = None
+
+ data = await self.handler.get_local_public_room_list(
+ limit=limit,
+ since_token=since_token,
+ search_filter=search_filter,
+ network_tuple=network_tuple,
+ from_federation=True,
+ )
+
+ return 200, data
+
+
+class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
+ """A group or user's server renews their attestation"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_groups_attestation_renewer()
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ # We don't need to check auth here as we check the attestation signatures
+
+ new_content = await self.handler.on_renew_attestation(
+ group_id, user_id, content
+ )
+
+ return 200, new_content
+
+
+class OpenIdUserInfo(BaseFederationServlet):
+ """
+ Exchange a bearer token for information about a user.
+
+ The response format should be compatible with:
+ http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
+
+ GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "sub": "@userpart:example.org",
+ }
+ """
+
+ PATH = "/openid/userinfo"
+
+ REQUIRE_AUTH = False
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_federation_server()
+
+ async def on_GET(
+ self,
+ origin: Optional[str],
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ ) -> Tuple[int, JsonDict]:
+ token = parse_string_from_args(query, "access_token")
+ if token is None:
+ return (
+ 401,
+ {"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
+ )
+
+ user_id = await self.handler.on_openid_userinfo(token)
+
+ if user_id is None:
+ return (
+ 401,
+ {
+ "errcode": "M_UNKNOWN_TOKEN",
+ "error": "Access Token unknown or expired",
+ },
+ )
+
+ return 200, {"sub": user_id}
+
+
+DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
+ "federation": FEDERATION_SERVLET_CLASSES,
+ "room_list": (PublicRoomList,),
+ "group_server": GROUP_SERVER_SERVLET_CLASSES,
+ "group_local": GROUP_LOCAL_SERVLET_CLASSES,
+ "group_attestation": (FederationGroupsRenewAttestaionServlet,),
+ "openid": (OpenIdUserInfo,),
+}
+
+
+def register_servlets(
+ hs: HomeServer,
+ resource: HttpServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ servlet_groups: Optional[Iterable[str]] = None,
+):
+ """Initialize and register servlet classes.
+
+ Will by default register all servlets. For custom behaviour, pass in
+ a list of servlet_groups to register.
+
+ Args:
+ hs: homeserver
+ resource: resource class to register to
+ authenticator: authenticator to use
+ ratelimiter: ratelimiter to use
+ servlet_groups: List of servlet groups to register.
+ Defaults to ``DEFAULT_SERVLET_GROUPS``.
+ """
+ if not servlet_groups:
+ servlet_groups = DEFAULT_SERVLET_GROUPS.keys()
+
+ for servlet_group in servlet_groups:
+ # Skip unknown servlet groups.
+ if servlet_group not in DEFAULT_SERVLET_GROUPS:
+ raise RuntimeError(
+ f"Attempting to register unknown federation servlet: '{servlet_group}'"
+ )
+
+ for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]:
+ servletclass(
+ hs=hs,
+ authenticator=authenticator,
+ ratelimiter=ratelimiter,
+ server_name=hs.hostname,
+ ).register(resource)
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
new file mode 100644
index 0000000000..624c859f1e
--- /dev/null
+++ b/synapse/federation/transport/server/_base.py
@@ -0,0 +1,328 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import logging
+import re
+
+from synapse.api.errors import Codes, FederationDeniedError, SynapseError
+from synapse.api.urls import FEDERATION_V1_PREFIX
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.logging import opentracing
+from synapse.logging.context import run_in_background
+from synapse.logging.opentracing import (
+ SynapseTags,
+ start_active_span,
+ start_active_span_from_request,
+ tags,
+ whitelisted_homeserver,
+)
+from synapse.server import HomeServer
+from synapse.util.ratelimitutils import FederationRateLimiter
+from synapse.util.stringutils import parse_and_validate_server_name
+
+logger = logging.getLogger(__name__)
+
+
+class AuthenticationError(SynapseError):
+ """There was a problem authenticating the request"""
+
+
+class NoAuthenticationError(AuthenticationError):
+ """The request had no authentication information"""
+
+
+class Authenticator:
+ def __init__(self, hs: HomeServer):
+ self._clock = hs.get_clock()
+ self.keyring = hs.get_keyring()
+ self.server_name = hs.hostname
+ self.store = hs.get_datastore()
+ self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+ self.notifier = hs.get_notifier()
+
+ self.replication_client = None
+ if hs.config.worker.worker_app:
+ self.replication_client = hs.get_tcp_replication()
+
+ # A method just so we can pass 'self' as the authenticator to the Servlets
+ async def authenticate_request(self, request, content):
+ now = self._clock.time_msec()
+ json_request = {
+ "method": request.method.decode("ascii"),
+ "uri": request.uri.decode("ascii"),
+ "destination": self.server_name,
+ "signatures": {},
+ }
+
+ if content is not None:
+ json_request["content"] = content
+
+ origin = None
+
+ auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
+
+ if not auth_headers:
+ raise NoAuthenticationError(
+ 401, "Missing Authorization headers", Codes.UNAUTHORIZED
+ )
+
+ for auth in auth_headers:
+ if auth.startswith(b"X-Matrix"):
+ (origin, key, sig) = _parse_auth_header(auth)
+ json_request["origin"] = origin
+ json_request["signatures"].setdefault(origin, {})[key] = sig
+
+ if (
+ self.federation_domain_whitelist is not None
+ and origin not in self.federation_domain_whitelist
+ ):
+ raise FederationDeniedError(origin)
+
+ if origin is None or not json_request["signatures"]:
+ raise NoAuthenticationError(
+ 401, "Missing Authorization headers", Codes.UNAUTHORIZED
+ )
+
+ await self.keyring.verify_json_for_server(
+ origin,
+ json_request,
+ now,
+ )
+
+ logger.debug("Request from %s", origin)
+ request.requester = origin
+
+ # If we get a valid signed request from the other side, its probably
+ # alive
+ retry_timings = await self.store.get_destination_retry_timings(origin)
+ if retry_timings and retry_timings.retry_last_ts:
+ run_in_background(self._reset_retry_timings, origin)
+
+ return origin
+
+ async def _reset_retry_timings(self, origin):
+ try:
+ logger.info("Marking origin %r as up", origin)
+ await self.store.set_destination_retry_timings(origin, None, 0, 0)
+
+ # Inform the relevant places that the remote server is back up.
+ self.notifier.notify_remote_server_up(origin)
+ if self.replication_client:
+ # If we're on a worker we try and inform master about this. The
+ # replication client doesn't hook into the notifier to avoid
+ # infinite loops where we send a `REMOTE_SERVER_UP` command to
+ # master, which then echoes it back to us which in turn pokes
+ # the notifier.
+ self.replication_client.send_remote_server_up(origin)
+
+ except Exception:
+ logger.exception("Error resetting retry timings on %s", origin)
+
+
+def _parse_auth_header(header_bytes):
+ """Parse an X-Matrix auth header
+
+ Args:
+ header_bytes (bytes): header value
+
+ Returns:
+ Tuple[str, str, str]: origin, key id, signature.
+
+ Raises:
+ AuthenticationError if the header could not be parsed
+ """
+ try:
+ header_str = header_bytes.decode("utf-8")
+ params = header_str.split(" ")[1].split(",")
+ param_dict = dict(kv.split("=") for kv in params)
+
+ def strip_quotes(value):
+ if value.startswith('"'):
+ return value[1:-1]
+ else:
+ return value
+
+ origin = strip_quotes(param_dict["origin"])
+
+ # ensure that the origin is a valid server name
+ parse_and_validate_server_name(origin)
+
+ key = strip_quotes(param_dict["key"])
+ sig = strip_quotes(param_dict["sig"])
+ return origin, key, sig
+ except Exception as e:
+ logger.warning(
+ "Error parsing auth header '%s': %s",
+ header_bytes.decode("ascii", "replace"),
+ e,
+ )
+ raise AuthenticationError(
+ 400, "Malformed Authorization header", Codes.UNAUTHORIZED
+ )
+
+
+class BaseFederationServlet:
+ """Abstract base class for federation servlet classes.
+
+ The servlet object should have a PATH attribute which takes the form of a regexp to
+ match against the request path (excluding the /federation/v1 prefix).
+
+ The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
+ the appropriate HTTP method. These methods must be *asynchronous* and have the
+ signature:
+
+ on_<METHOD>(self, origin, content, query, **kwargs)
+
+ With arguments:
+
+ origin (unicode|None): The authenticated server_name of the calling server,
+ unless REQUIRE_AUTH is set to False and authentication failed.
+
+ content (unicode|None): decoded json body of the request. None if the
+ request was a GET.
+
+ query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
+ (ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
+ yet.
+
+ **kwargs (dict[unicode, unicode]): the dict mapping keys to path
+ components as specified in the path match regexp.
+
+ Returns:
+ Optional[Tuple[int, object]]: either (response code, response object) to
+ return a JSON response, or None if the request has already been handled.
+
+ Raises:
+ SynapseError: to return an error code
+
+ Exception: other exceptions will be caught, logged, and a 500 will be
+ returned.
+ """
+
+ PATH = "" # Overridden in subclasses, the regex to match against the path.
+
+ REQUIRE_AUTH = True
+
+ PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
+
+ RATELIMIT = True # Whether to rate limit requests or not
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ self.hs = hs
+ self.authenticator = authenticator
+ self.ratelimiter = ratelimiter
+ self.server_name = server_name
+
+ def _wrap(self, func):
+ authenticator = self.authenticator
+ ratelimiter = self.ratelimiter
+
+ @functools.wraps(func)
+ async def new_func(request, *args, **kwargs):
+ """A callback which can be passed to HttpServer.RegisterPaths
+
+ Args:
+ request (twisted.web.http.Request):
+ *args: unused?
+ **kwargs (dict[unicode, unicode]): the dict mapping keys to path
+ components as specified in the path match regexp.
+
+ Returns:
+ Tuple[int, object]|None: (response code, response object) as returned by
+ the callback method. None if the request has already been handled.
+ """
+ content = None
+ if request.method in [b"PUT", b"POST"]:
+ # TODO: Handle other method types? other content types?
+ content = parse_json_object_from_request(request)
+
+ try:
+ origin = await authenticator.authenticate_request(request, content)
+ except NoAuthenticationError:
+ origin = None
+ if self.REQUIRE_AUTH:
+ logger.warning(
+ "authenticate_request failed: missing authentication"
+ )
+ raise
+ except Exception as e:
+ logger.warning("authenticate_request failed: %s", e)
+ raise
+
+ request_tags = {
+ SynapseTags.REQUEST_ID: request.get_request_id(),
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
+ tags.HTTP_METHOD: request.get_method(),
+ tags.HTTP_URL: request.get_redacted_uri(),
+ tags.PEER_HOST_IPV6: request.getClientIP(),
+ "authenticated_entity": origin,
+ "servlet_name": request.request_metrics.name,
+ }
+
+ # Only accept the span context if the origin is authenticated
+ # and whitelisted
+ if origin and whitelisted_homeserver(origin):
+ scope = start_active_span_from_request(
+ request, "incoming-federation-request", tags=request_tags
+ )
+ else:
+ scope = start_active_span(
+ "incoming-federation-request", tags=request_tags
+ )
+
+ with scope:
+ opentracing.inject_response_headers(request.responseHeaders)
+
+ if origin and self.RATELIMIT:
+ with ratelimiter.ratelimit(origin) as d:
+ await d
+ if request._disconnected:
+ logger.warning(
+ "client disconnected before we started processing "
+ "request"
+ )
+ return -1, None
+ response = await func(
+ origin, content, request.args, *args, **kwargs
+ )
+ else:
+ response = await func(
+ origin, content, request.args, *args, **kwargs
+ )
+
+ return response
+
+ return new_func
+
+ def register(self, server):
+ pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
+
+ for method in ("GET", "PUT", "POST"):
+ code = getattr(self, "on_%s" % (method), None)
+ if code is None:
+ continue
+
+ server.register_paths(
+ method,
+ (pattern,),
+ self._wrap(code),
+ self.__class__.__name__,
+ )
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
new file mode 100644
index 0000000000..2fdf6cc99e
--- /dev/null
+++ b/synapse/federation/transport/server/federation.py
@@ -0,0 +1,706 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
+
+from typing_extensions import Literal
+
+import synapse
+from synapse.api.errors import Codes, SynapseError
+from synapse.api.room_versions import RoomVersions
+from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
+from synapse.federation.transport.server._base import (
+ Authenticator,
+ BaseFederationServlet,
+)
+from synapse.http.servlet import (
+ parse_boolean_from_args,
+ parse_integer_from_args,
+ parse_string_from_args,
+ parse_strings_from_args,
+)
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util.ratelimitutils import FederationRateLimiter
+from synapse.util.versionstring import get_version_string
+
+logger = logging.getLogger(__name__)
+
+
+class BaseFederationServerServlet(BaseFederationServlet):
+ """Abstract base class for federation servlet classes which provides a federation server handler.
+
+ See BaseFederationServlet for more information.
+ """
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_federation_server()
+
+
+class FederationSendServlet(BaseFederationServerServlet):
+ PATH = "/send/(?P<transaction_id>[^/]*)/?"
+
+ # We ratelimit manually in the handler as we queue up the requests and we
+ # don't want to fill up the ratelimiter with blocked requests.
+ RATELIMIT = False
+
+ # This is when someone is trying to send us a bunch of data.
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ transaction_id: str,
+ ) -> Tuple[int, JsonDict]:
+ """Called on PUT /send/<transaction_id>/
+
+ Args:
+ transaction_id: The transaction_id associated with this request. This
+ is *not* None.
+
+ Returns:
+ Tuple of `(code, response)`, where
+ `response` is a python dict to be converted into JSON that is
+ used as the response body.
+ """
+ # Parse the request
+ try:
+ transaction_data = content
+
+ logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
+
+ logger.info(
+ "Received txn %s from %s. (PDUs: %d, EDUs: %d)",
+ transaction_id,
+ origin,
+ len(transaction_data.get("pdus", [])),
+ len(transaction_data.get("edus", [])),
+ )
+
+ except Exception as e:
+ logger.exception(e)
+ return 400, {"error": "Invalid transaction"}
+
+ code, response = await self.handler.on_incoming_transaction(
+ origin, transaction_id, self.server_name, transaction_data
+ )
+
+ return code, response
+
+
+class FederationEventServlet(BaseFederationServerServlet):
+ PATH = "/event/(?P<event_id>[^/]*)/?"
+
+ # This is when someone asks for a data item for a given server data_id pair.
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ event_id: str,
+ ) -> Tuple[int, Union[JsonDict, str]]:
+ return await self.handler.on_pdu_request(origin, event_id)
+
+
+class FederationStateV1Servlet(BaseFederationServerServlet):
+ PATH = "/state/(?P<room_id>[^/]*)/?"
+
+ # This is when someone asks for all data for a given room.
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ return await self.handler.on_room_state_request(
+ origin,
+ room_id,
+ parse_string_from_args(query, "event_id", None, required=False),
+ )
+
+
+class FederationStateIdsServlet(BaseFederationServerServlet):
+ PATH = "/state_ids/(?P<room_id>[^/]*)/?"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ return await self.handler.on_state_ids_request(
+ origin,
+ room_id,
+ parse_string_from_args(query, "event_id", None, required=True),
+ )
+
+
+class FederationBackfillServlet(BaseFederationServerServlet):
+ PATH = "/backfill/(?P<room_id>[^/]*)/?"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ versions = [x.decode("ascii") for x in query[b"v"]]
+ limit = parse_integer_from_args(query, "limit", None)
+
+ if not limit:
+ return 400, {"error": "Did not include limit param"}
+
+ return await self.handler.on_backfill_request(origin, room_id, versions, limit)
+
+
+class FederationQueryServlet(BaseFederationServerServlet):
+ PATH = "/query/(?P<query_type>[^/]*)"
+
+ # This is when we receive a server-server Query
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ query_type: str,
+ ) -> Tuple[int, JsonDict]:
+ args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
+ args["origin"] = origin
+ return await self.handler.on_query_request(query_type, args)
+
+
+class FederationMakeJoinServlet(BaseFederationServerServlet):
+ PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ """
+ Args:
+ origin: The authenticated server_name of the calling server
+
+ content: (GETs don't have bodies)
+
+ query: Query params from the request.
+
+ **kwargs: the dict mapping keys to path components as specified in
+ the path match regexp.
+
+ Returns:
+ Tuple of (response code, response object)
+ """
+ supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
+ if supported_versions is None:
+ supported_versions = ["1"]
+
+ result = await self.handler.on_make_join_request(
+ origin, room_id, user_id, supported_versions=supported_versions
+ )
+ return 200, result
+
+
+class FederationMakeLeaveServlet(BaseFederationServerServlet):
+ PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ result = await self.handler.on_make_leave_request(origin, room_id, user_id)
+ return 200, result
+
+
+class FederationV1SendLeaveServlet(BaseFederationServerServlet):
+ PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, Tuple[int, JsonDict]]:
+ result = await self.handler.on_send_leave_request(origin, content, room_id)
+ return 200, (200, result)
+
+
+class FederationV2SendLeaveServlet(BaseFederationServerServlet):
+ PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ PREFIX = FEDERATION_V2_PREFIX
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ result = await self.handler.on_send_leave_request(origin, content, room_id)
+ return 200, result
+
+
+class FederationMakeKnockServlet(BaseFederationServerServlet):
+ PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ # Retrieve the room versions the remote homeserver claims to support
+ supported_versions = parse_strings_from_args(
+ query, "ver", required=True, encoding="utf-8"
+ )
+
+ result = await self.handler.on_make_knock_request(
+ origin, room_id, user_id, supported_versions=supported_versions
+ )
+ return 200, result
+
+
+class FederationV1SendKnockServlet(BaseFederationServerServlet):
+ PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ result = await self.handler.on_send_knock_request(origin, content, room_id)
+ return 200, result
+
+
+class FederationEventAuthServlet(BaseFederationServerServlet):
+ PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ return await self.handler.on_event_auth(origin, room_id, event_id)
+
+
+class FederationV1SendJoinServlet(BaseFederationServerServlet):
+ PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, Tuple[int, JsonDict]]:
+ # TODO(paul): assert that event_id parsed from path actually
+ # match those given in content
+ result = await self.handler.on_send_join_request(origin, content, room_id)
+ return 200, (200, result)
+
+
+class FederationV2SendJoinServlet(BaseFederationServerServlet):
+ PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ PREFIX = FEDERATION_V2_PREFIX
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ # TODO(paul): assert that event_id parsed from path actually
+ # match those given in content
+ result = await self.handler.on_send_join_request(origin, content, room_id)
+ return 200, result
+
+
+class FederationV1InviteServlet(BaseFederationServerServlet):
+ PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, Tuple[int, JsonDict]]:
+ # We don't get a room version, so we have to assume its EITHER v1 or
+ # v2. This is "fine" as the only difference between V1 and V2 is the
+ # state resolution algorithm, and we don't use that for processing
+ # invites
+ result = await self.handler.on_invite_request(
+ origin, content, room_version_id=RoomVersions.V1.identifier
+ )
+
+ # V1 federation API is defined to return a content of `[200, {...}]`
+ # due to a historical bug.
+ return 200, (200, result)
+
+
+class FederationV2InviteServlet(BaseFederationServerServlet):
+ PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+
+ PREFIX = FEDERATION_V2_PREFIX
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ # TODO(paul): assert that room_id/event_id parsed from path actually
+ # match those given in content
+
+ room_version = content["room_version"]
+ event = content["event"]
+ invite_room_state = content["invite_room_state"]
+
+ # Synapse expects invite_room_state to be in unsigned, as it is in v1
+ # API
+
+ event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
+
+ result = await self.handler.on_invite_request(
+ origin, event, room_version_id=room_version
+ )
+ return 200, result
+
+
+class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
+ PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ await self.handler.on_exchange_third_party_invite_request(content)
+ return 200, {}
+
+
+class FederationClientKeysQueryServlet(BaseFederationServerServlet):
+ PATH = "/user/keys/query"
+
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ return await self.handler.on_query_client_keys(origin, content)
+
+
+class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
+ PATH = "/user/devices/(?P<user_id>[^/]*)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ return await self.handler.on_query_user_devices(origin, user_id)
+
+
+class FederationClientKeysClaimServlet(BaseFederationServerServlet):
+ PATH = "/user/keys/claim"
+
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ response = await self.handler.on_claim_client_keys(origin, content)
+ return 200, response
+
+
+class FederationGetMissingEventsServlet(BaseFederationServerServlet):
+ # TODO(paul): Why does this path alone end with "/?" optional?
+ PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ limit = int(content.get("limit", 10))
+ earliest_events = content.get("earliest_events", [])
+ latest_events = content.get("latest_events", [])
+
+ result = await self.handler.on_get_missing_events(
+ origin,
+ room_id=room_id,
+ earliest_events=earliest_events,
+ latest_events=latest_events,
+ limit=limit,
+ )
+
+ return 200, result
+
+
+class On3pidBindServlet(BaseFederationServerServlet):
+ PATH = "/3pid/onbind"
+
+ REQUIRE_AUTH = False
+
+ async def on_POST(
+ self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ if "invites" in content:
+ last_exception = None
+ for invite in content["invites"]:
+ try:
+ if "signed" not in invite or "token" not in invite["signed"]:
+ message = (
+ "Rejecting received notification of third-"
+ "party invite without signed: %s" % (invite,)
+ )
+ logger.info(message)
+ raise SynapseError(400, message)
+ await self.handler.exchange_third_party_invite(
+ invite["sender"],
+ invite["mxid"],
+ invite["room_id"],
+ invite["signed"],
+ )
+ except Exception as e:
+ last_exception = e
+ if last_exception:
+ raise last_exception
+ return 200, {}
+
+
+class FederationVersionServlet(BaseFederationServlet):
+ PATH = "/version"
+
+ REQUIRE_AUTH = False
+
+ async def on_GET(
+ self,
+ origin: Optional[str],
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ ) -> Tuple[int, JsonDict]:
+ return (
+ 200,
+ {"server": {"name": "Synapse", "version": get_version_string(synapse)}},
+ )
+
+
+class FederationSpaceSummaryServlet(BaseFederationServlet):
+ PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
+ PATH = "/spaces/(?P<room_id>[^/]*)"
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_room_summary_handler()
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Mapping[bytes, Sequence[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
+
+ max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
+ if max_rooms_per_space is not None and max_rooms_per_space < 0:
+ raise SynapseError(
+ 400,
+ "Value for 'max_rooms_per_space' must be a non-negative integer",
+ Codes.BAD_JSON,
+ )
+
+ exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[])
+
+ return 200, await self.handler.federation_space_summary(
+ origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
+ )
+
+ # TODO When switching to the stable endpoint, remove the POST handler.
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Mapping[bytes, Sequence[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ suggested_only = content.get("suggested_only", False)
+ if not isinstance(suggested_only, bool):
+ raise SynapseError(
+ 400, "'suggested_only' must be a boolean", Codes.BAD_JSON
+ )
+
+ exclude_rooms = content.get("exclude_rooms", [])
+ if not isinstance(exclude_rooms, list) or any(
+ not isinstance(x, str) for x in exclude_rooms
+ ):
+ raise SynapseError(400, "bad value for 'exclude_rooms'", Codes.BAD_JSON)
+
+ max_rooms_per_space = content.get("max_rooms_per_space")
+ if max_rooms_per_space is not None:
+ if not isinstance(max_rooms_per_space, int):
+ raise SynapseError(
+ 400, "bad value for 'max_rooms_per_space'", Codes.BAD_JSON
+ )
+ if max_rooms_per_space < 0:
+ raise SynapseError(
+ 400,
+ "Value for 'max_rooms_per_space' must be a non-negative integer",
+ Codes.BAD_JSON,
+ )
+
+ return 200, await self.handler.federation_space_summary(
+ origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
+ )
+
+
+class FederationRoomHierarchyServlet(BaseFederationServlet):
+ PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
+ PATH = "/hierarchy/(?P<room_id>[^/]*)"
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_room_summary_handler()
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Mapping[bytes, Sequence[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
+ return 200, await self.handler.get_federation_hierarchy(
+ origin, room_id, suggested_only
+ )
+
+
+class RoomComplexityServlet(BaseFederationServlet):
+ """
+ Indicates to other servers how complex (and therefore likely
+ resource-intensive) a public room this server knows about is.
+ """
+
+ PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
+ PREFIX = FEDERATION_UNSTABLE_PREFIX
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self._store = self.hs.get_datastore()
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ is_public = await self._store.is_room_world_readable_or_publicly_joinable(
+ room_id
+ )
+
+ if not is_public:
+ raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
+
+ complexity = await self._store.get_room_complexity(room_id)
+ return 200, complexity
+
+
+FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
+ FederationSendServlet,
+ FederationEventServlet,
+ FederationStateV1Servlet,
+ FederationStateIdsServlet,
+ FederationBackfillServlet,
+ FederationQueryServlet,
+ FederationMakeJoinServlet,
+ FederationMakeLeaveServlet,
+ FederationEventServlet,
+ FederationV1SendJoinServlet,
+ FederationV2SendJoinServlet,
+ FederationV1SendLeaveServlet,
+ FederationV2SendLeaveServlet,
+ FederationV1InviteServlet,
+ FederationV2InviteServlet,
+ FederationGetMissingEventsServlet,
+ FederationEventAuthServlet,
+ FederationClientKeysQueryServlet,
+ FederationUserDevicesQueryServlet,
+ FederationClientKeysClaimServlet,
+ FederationThirdPartyInviteExchangeServlet,
+ On3pidBindServlet,
+ FederationVersionServlet,
+ RoomComplexityServlet,
+ FederationSpaceSummaryServlet,
+ FederationRoomHierarchyServlet,
+ FederationV1SendKnockServlet,
+ FederationMakeKnockServlet,
+)
diff --git a/synapse/federation/transport/server/groups_local.py b/synapse/federation/transport/server/groups_local.py
new file mode 100644
index 0000000000..a12cd18d58
--- /dev/null
+++ b/synapse/federation/transport/server/groups_local.py
@@ -0,0 +1,113 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Dict, List, Tuple, Type
+
+from synapse.api.errors import SynapseError
+from synapse.federation.transport.server._base import (
+ Authenticator,
+ BaseFederationServlet,
+)
+from synapse.handlers.groups_local import GroupsLocalHandler
+from synapse.server import HomeServer
+from synapse.types import JsonDict, get_domain_from_id
+from synapse.util.ratelimitutils import FederationRateLimiter
+
+
+class BaseGroupsLocalServlet(BaseFederationServlet):
+ """Abstract base class for federation servlet classes which provides a groups local handler.
+
+ See BaseFederationServlet for more information.
+ """
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_groups_local_handler()
+
+
+class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
+ """A group server has invited a local user"""
+
+ PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ if get_domain_from_id(group_id) != origin:
+ raise SynapseError(403, "group_id doesn't match origin")
+
+ assert isinstance(
+ self.handler, GroupsLocalHandler
+ ), "Workers cannot handle group invites."
+
+ new_content = await self.handler.on_invite(group_id, user_id, content)
+
+ return 200, new_content
+
+
+class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
+ """A group server has removed a local user"""
+
+ PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, None]:
+ if get_domain_from_id(group_id) != origin:
+ raise SynapseError(403, "user_id doesn't match origin")
+
+ assert isinstance(
+ self.handler, GroupsLocalHandler
+ ), "Workers cannot handle group removals."
+
+ await self.handler.user_removed_from_group(group_id, user_id, content)
+
+ return 200, None
+
+
+class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
+ """Get roles in a group"""
+
+ PATH = "/get_groups_publicised"
+
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ resp = await self.handler.bulk_get_publicised_groups(
+ content["user_ids"], proxy=False
+ )
+
+ return 200, resp
+
+
+GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
+ FederationGroupsLocalInviteServlet,
+ FederationGroupsRemoveLocalUserServlet,
+ FederationGroupsBulkPublicisedServlet,
+)
diff --git a/synapse/federation/transport/server/groups_server.py b/synapse/federation/transport/server/groups_server.py
new file mode 100644
index 0000000000..b30e92a5eb
--- /dev/null
+++ b/synapse/federation/transport/server/groups_server.py
@@ -0,0 +1,753 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Dict, List, Tuple, Type
+
+from typing_extensions import Literal
+
+from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
+from synapse.api.errors import Codes, SynapseError
+from synapse.federation.transport.server._base import (
+ Authenticator,
+ BaseFederationServlet,
+)
+from synapse.http.servlet import parse_string_from_args
+from synapse.server import HomeServer
+from synapse.types import JsonDict, get_domain_from_id
+from synapse.util.ratelimitutils import FederationRateLimiter
+
+
+class BaseGroupsServerServlet(BaseFederationServlet):
+ """Abstract base class for federation servlet classes which provides a groups server handler.
+
+ See BaseFederationServlet for more information.
+ """
+
+ def __init__(
+ self,
+ hs: HomeServer,
+ authenticator: Authenticator,
+ ratelimiter: FederationRateLimiter,
+ server_name: str,
+ ):
+ super().__init__(hs, authenticator, ratelimiter, server_name)
+ self.handler = hs.get_groups_server_handler()
+
+
+class FederationGroupsProfileServlet(BaseGroupsServerServlet):
+ """Get/set the basic profile of a group on behalf of a user"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/profile"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.get_group_profile(group_id, requester_user_id)
+
+ return 200, new_content
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.update_group_profile(
+ group_id, requester_user_id, content
+ )
+
+ return 200, new_content
+
+
+class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
+ PATH = "/groups/(?P<group_id>[^/]*)/summary"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.get_group_summary(group_id, requester_user_id)
+
+ return 200, new_content
+
+
+class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
+ """Get the rooms in a group on behalf of a user"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/rooms"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
+
+ return 200, new_content
+
+
+class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
+ """Add/remove room from group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.add_room_to_group(
+ group_id, requester_user_id, room_id, content
+ )
+
+ return 200, new_content
+
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.remove_room_from_group(
+ group_id, requester_user_id, room_id
+ )
+
+ return 200, new_content
+
+
+class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
+ """Update room config in group"""
+
+ PATH = (
+ "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
+ "/config/(?P<config_key>[^/]*)"
+ )
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ room_id: str,
+ config_key: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ result = await self.handler.update_room_in_group(
+ group_id, requester_user_id, room_id, config_key, content
+ )
+
+ return 200, result
+
+
+class FederationGroupsUsersServlet(BaseGroupsServerServlet):
+ """Get the users in a group on behalf of a user"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/users"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
+
+ return 200, new_content
+
+
+class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
+ """Get the users that have been invited to a group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.get_invited_users_in_group(
+ group_id, requester_user_id
+ )
+
+ return 200, new_content
+
+
+class FederationGroupsInviteServlet(BaseGroupsServerServlet):
+ """Ask a group server to invite someone to the group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.invite_to_group(
+ group_id, user_id, requester_user_id, content
+ )
+
+ return 200, new_content
+
+
+class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
+ """Accept an invitation from the group server"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ if get_domain_from_id(user_id) != origin:
+ raise SynapseError(403, "user_id doesn't match origin")
+
+ new_content = await self.handler.accept_invite(group_id, user_id, content)
+
+ return 200, new_content
+
+
+class FederationGroupsJoinServlet(BaseGroupsServerServlet):
+ """Attempt to join a group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ if get_domain_from_id(user_id) != origin:
+ raise SynapseError(403, "user_id doesn't match origin")
+
+ new_content = await self.handler.join_group(group_id, user_id, content)
+
+ return 200, new_content
+
+
+class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
+ """Leave or kick a user from the group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.remove_user_from_group(
+ group_id, user_id, requester_user_id, content
+ )
+
+ return 200, new_content
+
+
+class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
+ """Add/remove a room from the group summary, with optional category.
+
+ Matches both:
+ - /groups/:group/summary/rooms/:room_id
+ - /groups/:group/summary/categories/:category/rooms/:room_id
+ """
+
+ PATH = (
+ "/groups/(?P<group_id>[^/]*)/summary"
+ "(/categories/(?P<category_id>[^/]+))?"
+ "/rooms/(?P<room_id>[^/]*)"
+ )
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if category_id == "":
+ raise SynapseError(
+ 400, "category_id cannot be empty string", Codes.INVALID_PARAM
+ )
+
+ if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
+ raise SynapseError(
+ 400,
+ "category_id may not be longer than %s characters"
+ % (MAX_GROUP_CATEGORYID_LENGTH,),
+ Codes.INVALID_PARAM,
+ )
+
+ resp = await self.handler.update_group_summary_room(
+ group_id,
+ requester_user_id,
+ room_id=room_id,
+ category_id=category_id,
+ content=content,
+ )
+
+ return 200, resp
+
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if category_id == "":
+ raise SynapseError(400, "category_id cannot be empty string")
+
+ resp = await self.handler.delete_group_summary_room(
+ group_id, requester_user_id, room_id=room_id, category_id=category_id
+ )
+
+ return 200, resp
+
+
+class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
+ """Get all categories for a group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ resp = await self.handler.get_group_categories(group_id, requester_user_id)
+
+ return 200, resp
+
+
+class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
+ """Add/remove/get a category in a group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ resp = await self.handler.get_group_category(
+ group_id, requester_user_id, category_id
+ )
+
+ return 200, resp
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if category_id == "":
+ raise SynapseError(400, "category_id cannot be empty string")
+
+ if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
+ raise SynapseError(
+ 400,
+ "category_id may not be longer than %s characters"
+ % (MAX_GROUP_CATEGORYID_LENGTH,),
+ Codes.INVALID_PARAM,
+ )
+
+ resp = await self.handler.upsert_group_category(
+ group_id, requester_user_id, category_id, content
+ )
+
+ return 200, resp
+
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if category_id == "":
+ raise SynapseError(400, "category_id cannot be empty string")
+
+ resp = await self.handler.delete_group_category(
+ group_id, requester_user_id, category_id
+ )
+
+ return 200, resp
+
+
+class FederationGroupsRolesServlet(BaseGroupsServerServlet):
+ """Get roles in a group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ resp = await self.handler.get_group_roles(group_id, requester_user_id)
+
+ return 200, resp
+
+
+class FederationGroupsRoleServlet(BaseGroupsServerServlet):
+ """Add/remove/get a role in a group"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
+
+ return 200, resp
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if role_id == "":
+ raise SynapseError(
+ 400, "role_id cannot be empty string", Codes.INVALID_PARAM
+ )
+
+ if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
+ raise SynapseError(
+ 400,
+ "role_id may not be longer than %s characters"
+ % (MAX_GROUP_ROLEID_LENGTH,),
+ Codes.INVALID_PARAM,
+ )
+
+ resp = await self.handler.update_group_role(
+ group_id, requester_user_id, role_id, content
+ )
+
+ return 200, resp
+
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if role_id == "":
+ raise SynapseError(400, "role_id cannot be empty string")
+
+ resp = await self.handler.delete_group_role(
+ group_id, requester_user_id, role_id
+ )
+
+ return 200, resp
+
+
+class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
+ """Add/remove a user from the group summary, with optional role.
+
+ Matches both:
+ - /groups/:group/summary/users/:user_id
+ - /groups/:group/summary/roles/:role/users/:user_id
+ """
+
+ PATH = (
+ "/groups/(?P<group_id>[^/]*)/summary"
+ "(/roles/(?P<role_id>[^/]+))?"
+ "/users/(?P<user_id>[^/]*)"
+ )
+
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if role_id == "":
+ raise SynapseError(400, "role_id cannot be empty string")
+
+ if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
+ raise SynapseError(
+ 400,
+ "role_id may not be longer than %s characters"
+ % (MAX_GROUP_ROLEID_LENGTH,),
+ Codes.INVALID_PARAM,
+ )
+
+ resp = await self.handler.update_group_summary_user(
+ group_id,
+ requester_user_id,
+ user_id=user_id,
+ role_id=role_id,
+ content=content,
+ )
+
+ return 200, resp
+
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ if role_id == "":
+ raise SynapseError(400, "role_id cannot be empty string")
+
+ resp = await self.handler.delete_group_summary_user(
+ group_id, requester_user_id, user_id=user_id, role_id=role_id
+ )
+
+ return 200, resp
+
+
+class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
+ """Sets whether a group is joinable without an invite or knock"""
+
+ PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
+
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester_user_id = parse_string_from_args(
+ query, "requester_user_id", required=True
+ )
+ if get_domain_from_id(requester_user_id) != origin:
+ raise SynapseError(403, "requester_user_id doesn't match origin")
+
+ new_content = await self.handler.set_group_join_policy(
+ group_id, requester_user_id, content
+ )
+
+ return 200, new_content
+
+
+GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
+ FederationGroupsProfileServlet,
+ FederationGroupsSummaryServlet,
+ FederationGroupsRoomsServlet,
+ FederationGroupsUsersServlet,
+ FederationGroupsInvitedUsersServlet,
+ FederationGroupsInviteServlet,
+ FederationGroupsAcceptInviteServlet,
+ FederationGroupsJoinServlet,
+ FederationGroupsRemoveUserServlet,
+ FederationGroupsSummaryRoomsServlet,
+ FederationGroupsCategoriesServlet,
+ FederationGroupsCategoryServlet,
+ FederationGroupsRolesServlet,
+ FederationGroupsRoleServlet,
+ FederationGroupsSummaryUsersServlet,
+ FederationGroupsAddRoomsServlet,
+ FederationGroupsAddRoomsConfigServlet,
+ FederationGroupsSettingJoinPolicyServlet,
+)
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index c83a261918..b9b12fbea5 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -17,18 +17,17 @@ server protocol.
"""
import logging
-from typing import Optional
+from typing import List, Optional
import attr
from synapse.types import JsonDict
-from synapse.util.jsonobject import JsonEncodedObject
logger = logging.getLogger(__name__)
-@attr.s(slots=True)
-class Edu(JsonEncodedObject):
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class Edu:
"""An Edu represents a piece of data sent from one homeserver to another.
In comparison to Pdus, Edus are not persisted for a long time on disk, are
@@ -36,10 +35,10 @@ class Edu(JsonEncodedObject):
internal ID or previous references graph.
"""
- edu_type = attr.ib(type=str)
- content = attr.ib(type=dict)
- origin = attr.ib(type=str)
- destination = attr.ib(type=str)
+ edu_type: str
+ content: dict
+ origin: str
+ destination: str
def get_dict(self) -> JsonDict:
return {
@@ -55,14 +54,21 @@ class Edu(JsonEncodedObject):
"destination": self.destination,
}
- def get_context(self):
+ def get_context(self) -> str:
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
- def strip_context(self):
+ def strip_context(self) -> None:
getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}"
-class Transaction(JsonEncodedObject):
+def _none_to_list(edus: Optional[List[JsonDict]]) -> List[JsonDict]:
+ if edus is None:
+ return []
+ return edus
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class Transaction:
"""A transaction is a list of Pdus and Edus to be sent to a remote home
server with some extra metadata.
@@ -78,47 +84,21 @@ class Transaction(JsonEncodedObject):
"""
- valid_keys = [
- "transaction_id",
- "origin",
- "destination",
- "origin_server_ts",
- "previous_ids",
- "pdus",
- "edus",
- ]
-
- internal_keys = ["transaction_id", "destination"]
-
- required_keys = [
- "transaction_id",
- "origin",
- "destination",
- "origin_server_ts",
- "pdus",
- ]
-
- def __init__(self, transaction_id=None, pdus: Optional[list] = None, **kwargs):
- """If we include a list of pdus then we decode then as PDU's
- automatically.
- """
-
- # If there's no EDUs then remove the arg
- if "edus" in kwargs and not kwargs["edus"]:
- del kwargs["edus"]
-
- super().__init__(transaction_id=transaction_id, pdus=pdus or [], **kwargs)
-
- @staticmethod
- def create_new(pdus, **kwargs):
- """Used to create a new transaction. Will auto fill out
- transaction_id and origin_server_ts keys.
- """
- if "origin_server_ts" not in kwargs:
- raise KeyError("Require 'origin_server_ts' to construct a Transaction")
- if "transaction_id" not in kwargs:
- raise KeyError("Require 'transaction_id' to construct a Transaction")
-
- kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
-
- return Transaction(**kwargs)
+ # Required keys.
+ transaction_id: str
+ origin: str
+ destination: str
+ origin_server_ts: int
+ pdus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list)
+ edus: List[JsonDict] = attr.ib(factory=list, converter=_none_to_list)
+
+ def get_dict(self) -> JsonDict:
+ """A JSON-ready dictionary of valid keys which aren't internal."""
+ result = {
+ "origin": self.origin,
+ "origin_server_ts": self.origin_server_ts,
+ "pdus": self.pdus,
+ }
+ if self.edus:
+ result["edus"] = self.edus
+ return result
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 3dc55ab861..d6b75ac27f 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -332,6 +332,13 @@ class GroupsServerWorkerHandler:
requester_user_id, group_id
)
+ # Note! room_results["is_public"] is about whether the room is considered
+ # public from the group's point of view. (i.e. whether non-group members
+ # should be able to see the room is in the group).
+ # This is not the same as whether the room itself is public (in the sense
+ # of being visible in the room directory).
+ # As such, room_results["is_public"] itself is not sufficient to determine
+ # whether any given user is permitted to see the room's metadata.
room_results = await self.store.get_rooms_in_group(
group_id, include_private=is_user_in_group
)
@@ -341,8 +348,15 @@ class GroupsServerWorkerHandler:
room_id = room_result["room_id"]
joined_users = await self.store.get_users_in_room(room_id)
+
+ # check the user is actually allowed to see the room before showing it to them
+ allow_private = requester_user_id in joined_users
+
entry = await self.room_list_handler.generate_room_entry(
- room_id, len(joined_users), with_alias=False, allow_private=True
+ room_id,
+ len(joined_users),
+ with_alias=False,
+ allow_private=allow_private,
)
if not entry:
@@ -354,7 +368,7 @@ class GroupsServerWorkerHandler:
chunk.sort(key=lambda e: -e["num_joined_members"])
- return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
+ return {"chunk": chunk, "total_room_count_estimate": len(chunk)}
class GroupsServerHandler(GroupsServerWorkerHandler):
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index 6a05a65305..955cfa2207 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -15,10 +15,7 @@
import logging
from typing import TYPE_CHECKING, Optional
-import synapse.types
-from synapse.api.constants import EventTypes, Membership
from synapse.api.ratelimiting import Ratelimiter
-from synapse.types import UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -115,68 +112,3 @@ class BaseHandler:
burst_count=burst_count,
update=update,
)
-
- async def maybe_kick_guest_users(self, event, context=None):
- # Technically this function invalidates current_state by changing it.
- # Hopefully this isn't that important to the caller.
- if event.type == EventTypes.GuestAccess:
- guest_access = event.content.get("guest_access", "forbidden")
- if guest_access != "can_join":
- if context:
- current_state_ids = await context.get_current_state_ids()
- current_state_dict = await self.store.get_events(
- list(current_state_ids.values())
- )
- current_state = list(current_state_dict.values())
- else:
- current_state_map = await self.state_handler.get_current_state(
- event.room_id
- )
- current_state = list(current_state_map.values())
-
- logger.info("maybe_kick_guest_users %r", current_state)
- await self.kick_guest_users(current_state)
-
- async def kick_guest_users(self, current_state):
- for member_event in current_state:
- try:
- if member_event.type != EventTypes.Member:
- continue
-
- target_user = UserID.from_string(member_event.state_key)
- if not self.hs.is_mine(target_user):
- continue
-
- if member_event.content["membership"] not in {
- Membership.JOIN,
- Membership.INVITE,
- }:
- continue
-
- if (
- "kind" not in member_event.content
- or member_event.content["kind"] != "guest"
- ):
- continue
-
- # We make the user choose to leave, rather than have the
- # event-sender kick them. This is partially because we don't
- # need to worry about power levels, and partially because guest
- # users are a concept which doesn't hugely work over federation,
- # and having homeservers have their own users leave keeps more
- # of that decision-making and control local to the guest-having
- # homeserver.
- requester = synapse.types.create_requester(
- target_user, is_guest=True, authenticated_entity=self.server_name
- )
- handler = self.hs.get_room_member_handler()
- await handler.update_membership(
- requester,
- target_user,
- member_event.room_id,
- "leave",
- ratelimit=False,
- require_consent=False,
- )
- except Exception as e:
- logger.exception("Error kicking guest user: %s" % (e,))
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 21a17cd2e8..4ab4046650 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -392,9 +392,6 @@ class ApplicationServicesHandler:
protocols[p].append(info)
def _merge_instances(infos: List[JsonDict]) -> JsonDict:
- if not infos:
- return {}
-
# Merge the 'instances' lists of multiple results, but just take
# the other fields from the first as they ought to be identical
# copy the result so as not to corrupt the cached one
@@ -406,7 +403,9 @@ class ApplicationServicesHandler:
return combined
- return {p: _merge_instances(protocols[p]) for p in protocols.keys()}
+ return {
+ p: _merge_instances(protocols[p]) for p in protocols.keys() if protocols[p]
+ }
async def _get_services_for_event(
self, event: EventBase
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 22a8552241..34725324a6 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -73,7 +73,7 @@ from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
if TYPE_CHECKING:
- from synapse.rest.client.v1.login import LoginResponse
+ from synapse.rest.client.login import LoginResponse
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -461,7 +461,7 @@ class AuthHandler(BaseHandler):
If no auth flows have been completed successfully, raises an
InteractiveAuthIncompleteError. To handle this, you can use
- synapse.rest.client.v2_alpha._base.interactive_auth_handler as a
+ synapse.rest.client._base.interactive_auth_handler as a
decorator.
Args:
@@ -543,7 +543,7 @@ class AuthHandler(BaseHandler):
# Note that the registration endpoint explicitly removes the
# "initial_device_display_name" parameter if it is provided
# without a "password" parameter. See the changes to
- # synapse.rest.client.v2_alpha.register.RegisterRestServlet.on_POST
+ # synapse.rest.client.register.RegisterRestServlet.on_POST
# in commit 544722bad23fc31056b9240189c3cbbbf0ffd3f9.
if not clientdict:
clientdict = session.clientdict
@@ -627,23 +627,28 @@ class AuthHandler(BaseHandler):
async def add_oob_auth(
self, stagetype: str, authdict: Dict[str, Any], clientip: str
- ) -> bool:
+ ) -> None:
"""
Adds the result of out-of-band authentication into an existing auth
session. Currently used for adding the result of fallback auth.
+
+ Raises:
+ LoginError if the stagetype is unknown or the session is missing.
+ LoginError is raised by check_auth if authentication fails.
"""
if stagetype not in self.checkers:
- raise LoginError(400, "", Codes.MISSING_PARAM)
+ raise LoginError(
+ 400, f"Unknown UIA stage type: {stagetype}", Codes.INVALID_PARAM
+ )
if "session" not in authdict:
- raise LoginError(400, "", Codes.MISSING_PARAM)
+ raise LoginError(400, "Missing session ID", Codes.MISSING_PARAM)
+ # If authentication fails a LoginError is raised. Otherwise, store
+ # the successful result.
result = await self.checkers[stagetype].check_auth(authdict, clientip)
- if result:
- await self.store.mark_ui_auth_stage_complete(
- authdict["session"], stagetype, result
- )
- return True
- return False
+ await self.store.mark_ui_auth_stage_complete(
+ authdict["session"], stagetype, result
+ )
def get_session_id(self, clientdict: Dict[str, Any]) -> Optional[str]:
"""
@@ -1459,6 +1464,10 @@ class AuthHandler(BaseHandler):
)
await self.store.user_delete_threepid(user_id, medium, address)
+ if medium == "email":
+ await self.store.delete_pusher_by_app_id_pushkey_user_id(
+ app_id="m.email", pushkey=address, user_id=user_id
+ )
return result
async def hash(self, password: str) -> str:
@@ -1727,7 +1736,6 @@ class AuthHandler(BaseHandler):
@attr.s(slots=True)
class MacaroonGenerator:
-
hs = attr.ib()
def generate_guest_access_token(self, user_id: str) -> str:
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index 53fac1f8a3..4288ffff09 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -213,7 +213,7 @@ class EventAuthHandler:
raise AuthError(
403,
- "You do not belong to any of the required rooms to join this room.",
+ "You do not belong to any of the required rooms/spaces to join this room.",
)
async def has_restricted_join_rules(
@@ -240,7 +240,7 @@ class EventAuthHandler:
# If the join rule is not restricted, this doesn't apply.
join_rules_event = await self._store.get_event(join_rules_event_id)
- return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED
+ return join_rules_event.content.get("join_rule") == JoinRules.RESTRICTED
async def get_rooms_that_allow_join(
self, state_ids: StateMap[str]
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index b4d4759c61..bc17b45b27 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -17,23 +17,9 @@
import itertools
import logging
-from collections.abc import Container
from http import HTTPStatus
-from typing import (
- TYPE_CHECKING,
- Collection,
- Dict,
- Iterable,
- List,
- Optional,
- Sequence,
- Set,
- Tuple,
- Union,
-)
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
-import attr
-from prometheus_client import Counter
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
@@ -41,18 +27,12 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import event_auth
-from synapse.api.constants import (
- EventTypes,
- Membership,
- RejectedReason,
- RoomEncryptionAlgorithms,
-)
+from synapse.api.constants import EventTypes, Membership, RejectedReason
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
- FederationError,
HttpResponseException,
NotFoundError,
RequestSendFailed,
@@ -60,10 +40,10 @@ from synapse.api.errors import (
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
-from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
+from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers._base import BaseHandler
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
@@ -74,28 +54,14 @@ from synapse.logging.context import (
)
from synapse.logging.opentracing import start_active_span
from synapse.logging.utils import log_function
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
- ReplicationFederationSendEventsRestServlet,
ReplicationStoreRoomOnOutlierMembershipRestServlet,
)
-from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.types import (
- JsonDict,
- MutableStateMap,
- PersistedEventPosition,
- RoomStreamToken,
- StateMap,
- UserID,
- get_domain_from_id,
-)
-from synapse.util.async_helpers import Linearizer, concurrently_execute
-from synapse.util.iterutils import batch_iter
+from synapse.types import JsonDict, StateMap, get_domain_from_id
+from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
-from synapse.util.stringutils import shortstr
from synapse.visibility import filter_events_for_server
if TYPE_CHECKING:
@@ -103,38 +69,11 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-soft_failed_event_counter = Counter(
- "synapse_federation_soft_failed_events_total",
- "Events received over federation that we marked as soft_failed",
-)
-
-
-@attr.s(slots=True)
-class _NewEventInfo:
- """Holds information about a received event, ready for passing to _auth_and_persist_events
-
- Attributes:
- event: the received event
-
- state: the state at that event
-
- auth_events: the auth_event map for that event
- """
-
- event = attr.ib(type=EventBase)
- state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
- auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None)
-
class FederationHandler(BaseHandler):
- """Handles events that originated from federation.
- Responsible for:
- a) handling received Pdus before handing them on as Events to the rest
- of the homeserver (including auth and state conflict resolutions)
- b) converting events that were produced by local clients that may need
- to be sent to remote homeservers.
- c) doing the necessary dances to invite remote users and join remote
- rooms.
+ """Handles general incoming federation requests
+
+ Incoming events are *not* handled here, for which see FederationEventHandler.
"""
def __init__(self, hs: "HomeServer"):
@@ -147,896 +86,35 @@ class FederationHandler(BaseHandler):
self.state_store = self.storage.state
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
- self._state_resolution_handler = hs.get_state_resolution_handler()
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
- self.action_generator = hs.get_action_generator()
self.is_mine_id = hs.is_mine_id
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self._event_auth_handler = hs.get_event_auth_handler()
- self._message_handler = hs.get_message_handler()
self._server_notices_mxid = hs.config.server_notices_mxid
self.config = hs.config
self.http_client = hs.get_proxied_blacklisted_http_client()
- self._instance_name = hs.get_instance_name()
self._replication = hs.get_replication_data_handler()
+ self._federation_event_handler = hs.get_federation_event_handler()
- self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
)
if hs.config.worker_app:
- self._user_device_resync = (
- ReplicationUserDevicesResyncRestServlet.make_client(hs)
- )
self._maybe_store_room_on_outlier_membership = (
ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client(hs)
)
else:
- self._device_list_updater = hs.get_device_handler().device_list_updater
self._maybe_store_room_on_outlier_membership = (
self.store.maybe_store_room_on_outlier_membership
)
- # When joining a room we need to queue any events for that room up.
- # For each room, a list of (pdu, origin) tuples.
- self.room_queues: Dict[str, List[Tuple[EventBase, str]]] = {}
- self._room_pdu_linearizer = Linearizer("fed_room_pdu")
-
self._room_backfill = Linearizer("room_backfill")
self.third_party_event_rules = hs.get_third_party_event_rules()
- self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
-
- async def on_receive_pdu(
- self, origin: str, pdu: EventBase, sent_to_us_directly: bool = False
- ) -> None:
- """Process a PDU received via a federation /send/ transaction, or
- via backfill of missing prev_events
-
- Args:
- origin: server which initiated the /send/ transaction. Will
- be used to fetch missing events or state.
- pdu: received PDU
- sent_to_us_directly: True if this event was pushed to us; False if
- we pulled it as the result of a missing prev_event.
- """
-
- room_id = pdu.room_id
- event_id = pdu.event_id
-
- logger.info("handling received PDU: %s", pdu)
-
- # We reprocess pdus when we have seen them only as outliers
- existing = await self.store.get_event(
- event_id, allow_none=True, allow_rejected=True
- )
-
- # FIXME: Currently we fetch an event again when we already have it
- # if it has been marked as an outlier.
-
- already_seen = existing and (
- not existing.internal_metadata.is_outlier()
- or pdu.internal_metadata.is_outlier()
- )
- if already_seen:
- logger.debug("Already seen pdu")
- return
-
- # do some initial sanity-checking of the event. In particular, make
- # sure it doesn't have hundreds of prev_events or auth_events, which
- # could cause a huge state resolution or cascade of event fetches.
- try:
- self._sanity_check_event(pdu)
- except SynapseError as err:
- logger.warning("Received event failed sanity checks")
- raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
-
- # If we are currently in the process of joining this room, then we
- # queue up events for later processing.
- if room_id in self.room_queues:
- logger.info(
- "Queuing PDU from %s for now: join in progress",
- origin,
- )
- self.room_queues[room_id].append((pdu, origin))
- return
-
- # If we're not in the room just ditch the event entirely. This is
- # probably an old server that has come back and thinks we're still in
- # the room (or we've been rejoined to the room by a state reset).
- #
- # Note that if we were never in the room then we would have already
- # dropped the event, since we wouldn't know the room version.
- is_in_room = await self._event_auth_handler.check_host_in_room(
- room_id, self.server_name
- )
- if not is_in_room:
- logger.info(
- "Ignoring PDU from %s as we're not in the room",
- origin,
- )
- return None
-
- state = None
-
- # Get missing pdus if necessary.
- if not pdu.internal_metadata.is_outlier():
- # We only backfill backwards to the min depth.
- min_depth = await self.get_min_depth_for_context(pdu.room_id)
-
- logger.debug("min_depth: %d", min_depth)
-
- prevs = set(pdu.prev_event_ids())
- seen = await self.store.have_events_in_timeline(prevs)
-
- if min_depth is not None and pdu.depth < min_depth:
- # This is so that we don't notify the user about this
- # message, to work around the fact that some events will
- # reference really really old events we really don't want to
- # send to the clients.
- pdu.internal_metadata.outlier = True
- elif min_depth is not None and pdu.depth > min_depth:
- missing_prevs = prevs - seen
- if sent_to_us_directly and missing_prevs:
- # If we're missing stuff, ensure we only fetch stuff one
- # at a time.
- logger.info(
- "Acquiring room lock to fetch %d missing prev_events: %s",
- len(missing_prevs),
- shortstr(missing_prevs),
- )
- with (await self._room_pdu_linearizer.queue(pdu.room_id)):
- logger.info(
- "Acquired room lock to fetch %d missing prev_events",
- len(missing_prevs),
- )
-
- try:
- await self._get_missing_events_for_pdu(
- origin, pdu, prevs, min_depth
- )
- except Exception as e:
- raise Exception(
- "Error fetching missing prev_events for %s: %s"
- % (event_id, e)
- ) from e
-
- # Update the set of things we've seen after trying to
- # fetch the missing stuff
- seen = await self.store.have_events_in_timeline(prevs)
-
- if not prevs - seen:
- logger.info(
- "Found all missing prev_events",
- )
-
- if prevs - seen:
- # We've still not been able to get all of the prev_events for this event.
- #
- # In this case, we need to fall back to asking another server in the
- # federation for the state at this event. That's ok provided we then
- # resolve the state against other bits of the DAG before using it (which
- # will ensure that you can't just take over a room by sending an event,
- # withholding its prev_events, and declaring yourself to be an admin in
- # the subsequent state request).
- #
- # Now, if we're pulling this event as a missing prev_event, then clearly
- # this event is not going to become the only forward-extremity and we are
- # guaranteed to resolve its state against our existing forward
- # extremities, so that should be fine.
- #
- # On the other hand, if this event was pushed to us, it is possible for
- # it to become the only forward-extremity in the room, and we would then
- # trust its state to be the state for the whole room. This is very bad.
- # Further, if the event was pushed to us, there is no excuse for us not to
- # have all the prev_events. We therefore reject any such events.
- #
- # XXX this really feels like it could/should be merged with the above,
- # but there is an interaction with min_depth that I'm not really
- # following.
-
- if sent_to_us_directly:
- logger.warning(
- "Rejecting: failed to fetch %d prev events: %s",
- len(prevs - seen),
- shortstr(prevs - seen),
- )
- raise FederationError(
- "ERROR",
- 403,
- (
- "Your server isn't divulging details about prev_events "
- "referenced in this event."
- ),
- affected=pdu.event_id,
- )
-
- logger.info(
- "Event %s is missing prev_events: calculating state for a "
- "backwards extremity",
- event_id,
- )
-
- # Calculate the state after each of the previous events, and
- # resolve them to find the correct state at the current event.
- event_map = {event_id: pdu}
- try:
- # Get the state of the events we know about
- ours = await self.state_store.get_state_groups_ids(room_id, seen)
-
- # state_maps is a list of mappings from (type, state_key) to event_id
- state_maps: List[StateMap[str]] = list(ours.values())
-
- # we don't need this any more, let's delete it.
- del ours
-
- # Ask the remote server for the states we don't
- # know about
- for p in prevs - seen:
- logger.info("Requesting state after missing prev_event %s", p)
-
- with nested_logging_context(p):
- # note that if any of the missing prevs share missing state or
- # auth events, the requests to fetch those events are deduped
- # by the get_pdu_cache in federation_client.
- remote_state = (
- await self._get_state_after_missing_prev_event(
- origin, room_id, p
- )
- )
-
- remote_state_map = {
- (x.type, x.state_key): x.event_id for x in remote_state
- }
- state_maps.append(remote_state_map)
-
- for x in remote_state:
- event_map[x.event_id] = x
-
- room_version = await self.store.get_room_version_id(room_id)
- state_map = (
- await self._state_resolution_handler.resolve_events_with_store(
- room_id,
- room_version,
- state_maps,
- event_map,
- state_res_store=StateResolutionStore(self.store),
- )
- )
-
- # We need to give _process_received_pdu the actual state events
- # rather than event ids, so generate that now.
-
- # First though we need to fetch all the events that are in
- # state_map, so we can build up the state below.
- evs = await self.store.get_events(
- list(state_map.values()),
- get_prev_content=False,
- redact_behaviour=EventRedactBehaviour.AS_IS,
- )
- event_map.update(evs)
-
- state = [event_map[e] for e in state_map.values()]
- except Exception:
- logger.warning(
- "Error attempting to resolve state at missing " "prev_events",
- exc_info=True,
- )
- raise FederationError(
- "ERROR",
- 403,
- "We can't get valid state history.",
- affected=event_id,
- )
-
- await self._process_received_pdu(origin, pdu, state=state)
-
- async def _get_missing_events_for_pdu(
- self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
- ) -> None:
- """
- Args:
- origin: Origin of the pdu. Will be called to get the missing events
- pdu: received pdu
- prevs: List of event ids which we are missing
- min_depth: Minimum depth of events to return.
- """
-
- room_id = pdu.room_id
- event_id = pdu.event_id
-
- seen = await self.store.have_events_in_timeline(prevs)
-
- if not prevs - seen:
- return
-
- latest_list = await self.store.get_latest_event_ids_in_room(room_id)
-
- # We add the prev events that we have seen to the latest
- # list to ensure the remote server doesn't give them to us
- latest = set(latest_list)
- latest |= seen
-
- logger.info(
- "Requesting missing events between %s and %s",
- shortstr(latest),
- event_id,
- )
-
- # XXX: we set timeout to 10s to help workaround
- # https://github.com/matrix-org/synapse/issues/1733.
- # The reason is to avoid holding the linearizer lock
- # whilst processing inbound /send transactions, causing
- # FDs to stack up and block other inbound transactions
- # which empirically can currently take up to 30 minutes.
- #
- # N.B. this explicitly disables retry attempts.
- #
- # N.B. this also increases our chances of falling back to
- # fetching fresh state for the room if the missing event
- # can't be found, which slightly reduces our security.
- # it may also increase our DAG extremity count for the room,
- # causing additional state resolution? See #1760.
- # However, fetching state doesn't hold the linearizer lock
- # apparently.
- #
- # see https://github.com/matrix-org/synapse/pull/1744
- #
- # ----
- #
- # Update richvdh 2018/09/18: There are a number of problems with timing this
- # request out aggressively on the client side:
- #
- # - it plays badly with the server-side rate-limiter, which starts tarpitting you
- # if you send too many requests at once, so you end up with the server carefully
- # working through the backlog of your requests, which you have already timed
- # out.
- #
- # - for this request in particular, we now (as of
- # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
- # server can't produce a plausible-looking set of prev_events - so we becone
- # much more likely to reject the event.
- #
- # - contrary to what it says above, we do *not* fall back to fetching fresh state
- # for the room if get_missing_events times out. Rather, we give up processing
- # the PDU whose prevs we are missing, which then makes it much more likely that
- # we'll end up back here for the *next* PDU in the list, which exacerbates the
- # problem.
- #
- # - the aggressive 10s timeout was introduced to deal with incoming federation
- # requests taking 8 hours to process. It's not entirely clear why that was going
- # on; certainly there were other issues causing traffic storms which are now
- # resolved, and I think in any case we may be more sensible about our locking
- # now. We're *certainly* more sensible about our logging.
- #
- # All that said: Let's try increasing the timeout to 60s and see what happens.
-
- try:
- missing_events = await self.federation_client.get_missing_events(
- origin,
- room_id,
- earliest_events_ids=list(latest),
- latest_events=[pdu],
- limit=10,
- min_depth=min_depth,
- timeout=60000,
- )
- except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e:
- # We failed to get the missing events, but since we need to handle
- # the case of `get_missing_events` not returning the necessary
- # events anyway, it is safe to simply log the error and continue.
- logger.warning("Failed to get prev_events: %s", e)
- return
-
- logger.info(
- "Got %d prev_events: %s",
- len(missing_events),
- shortstr(missing_events),
- )
-
- # We want to sort these by depth so we process them and
- # tell clients about them in order.
- missing_events.sort(key=lambda x: x.depth)
-
- for ev in missing_events:
- logger.info(
- "Handling received prev_event %s",
- ev.event_id,
- )
- with nested_logging_context(ev.event_id):
- try:
- await self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
- except FederationError as e:
- if e.code == 403:
- logger.warning(
- "Received prev_event %s failed history check.",
- ev.event_id,
- )
- else:
- raise
-
- async def _get_state_for_room(
- self,
- destination: str,
- room_id: str,
- event_id: str,
- ) -> List[EventBase]:
- """Requests all of the room state at a given event from a remote
- homeserver.
-
- Will also fetch any missing events reported in the `auth_chain_ids`
- section of `/state_ids`.
-
- Args:
- destination: The remote homeserver to query for the state.
- room_id: The id of the room we're interested in.
- event_id: The id of the event we want the state at.
-
- Returns:
- A list of events in the state, not including the event itself.
- """
- (
- state_event_ids,
- auth_event_ids,
- ) = await self.federation_client.get_room_state_ids(
- destination, room_id, event_id=event_id
- )
-
- # Fetch the state events from the DB, and check we have the auth events.
- event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
- auth_events_in_store = await self.store.have_seen_events(
- room_id, auth_event_ids
- )
-
- # Check for missing events. We handle state and auth event seperately,
- # as we want to pull the state from the DB, but we don't for the auth
- # events. (Note: we likely won't use the majority of the auth chain, and
- # it can be *huge* for large rooms, so it's worth ensuring that we don't
- # unnecessarily pull it from the DB).
- missing_state_events = set(state_event_ids) - set(event_map)
- missing_auth_events = set(auth_event_ids) - set(auth_events_in_store)
- if missing_state_events or missing_auth_events:
- await self._get_events_and_persist(
- destination=destination,
- room_id=room_id,
- events=missing_state_events | missing_auth_events,
- )
-
- if missing_state_events:
- new_events = await self.store.get_events(
- missing_state_events, allow_rejected=True
- )
- event_map.update(new_events)
-
- missing_state_events.difference_update(new_events)
-
- if missing_state_events:
- logger.warning(
- "Failed to fetch missing state events for %s %s",
- event_id,
- missing_state_events,
- )
-
- if missing_auth_events:
- auth_events_in_store = await self.store.have_seen_events(
- room_id, missing_auth_events
- )
- missing_auth_events.difference_update(auth_events_in_store)
-
- if missing_auth_events:
- logger.warning(
- "Failed to fetch missing auth events for %s %s",
- event_id,
- missing_auth_events,
- )
-
- remote_state = list(event_map.values())
-
- # check for events which were in the wrong room.
- #
- # this can happen if a remote server claims that the state or
- # auth_events at an event in room A are actually events in room B
-
- bad_events = [
- (event.event_id, event.room_id)
- for event in remote_state
- if event.room_id != room_id
- ]
-
- for bad_event_id, bad_room_id in bad_events:
- # This is a bogus situation, but since we may only discover it a long time
- # after it happened, we try our best to carry on, by just omitting the
- # bad events from the returned auth/state set.
- logger.warning(
- "Remote server %s claims event %s in room %s is an auth/state "
- "event in room %s",
- destination,
- bad_event_id,
- bad_room_id,
- room_id,
- )
-
- if bad_events:
- remote_state = [e for e in remote_state if e.room_id == room_id]
-
- return remote_state
-
- async def _get_state_after_missing_prev_event(
- self,
- destination: str,
- room_id: str,
- event_id: str,
- ) -> List[EventBase]:
- """Requests all of the room state at a given event from a remote homeserver.
-
- Args:
- destination: The remote homeserver to query for the state.
- room_id: The id of the room we're interested in.
- event_id: The id of the event we want the state at.
-
- Returns:
- A list of events in the state, including the event itself
- """
- # TODO: This function is basically the same as _get_state_for_room. Can
- # we make backfill() use it, rather than having two code paths? I think the
- # only difference is that backfill() persists the prev events separately.
-
- (
- state_event_ids,
- auth_event_ids,
- ) = await self.federation_client.get_room_state_ids(
- destination, room_id, event_id=event_id
- )
-
- logger.debug(
- "state_ids returned %i state events, %i auth events",
- len(state_event_ids),
- len(auth_event_ids),
- )
-
- # start by just trying to fetch the events from the store
- desired_events = set(state_event_ids)
- desired_events.add(event_id)
- logger.debug("Fetching %i events from cache/store", len(desired_events))
- fetched_events = await self.store.get_events(
- desired_events, allow_rejected=True
- )
-
- missing_desired_events = desired_events - fetched_events.keys()
- logger.debug(
- "We are missing %i events (got %i)",
- len(missing_desired_events),
- len(fetched_events),
- )
-
- # We probably won't need most of the auth events, so let's just check which
- # we have for now, rather than thrashing the event cache with them all
- # unnecessarily.
-
- # TODO: we probably won't actually need all of the auth events, since we
- # already have a bunch of the state events. It would be nice if the
- # federation api gave us a way of finding out which we actually need.
-
- missing_auth_events = set(auth_event_ids) - fetched_events.keys()
- missing_auth_events.difference_update(
- await self.store.have_seen_events(room_id, missing_auth_events)
- )
- logger.debug("We are also missing %i auth events", len(missing_auth_events))
-
- missing_events = missing_desired_events | missing_auth_events
- logger.debug("Fetching %i events from remote", len(missing_events))
- await self._get_events_and_persist(
- destination=destination, room_id=room_id, events=missing_events
- )
-
- # we need to make sure we re-load from the database to get the rejected
- # state correct.
- fetched_events.update(
- await self.store.get_events(missing_desired_events, allow_rejected=True)
- )
-
- # check for events which were in the wrong room.
- #
- # this can happen if a remote server claims that the state or
- # auth_events at an event in room A are actually events in room B
-
- bad_events = [
- (event_id, event.room_id)
- for event_id, event in fetched_events.items()
- if event.room_id != room_id
- ]
-
- for bad_event_id, bad_room_id in bad_events:
- # This is a bogus situation, but since we may only discover it a long time
- # after it happened, we try our best to carry on, by just omitting the
- # bad events from the returned state set.
- logger.warning(
- "Remote server %s claims event %s in room %s is an auth/state "
- "event in room %s",
- destination,
- bad_event_id,
- bad_room_id,
- room_id,
- )
-
- del fetched_events[bad_event_id]
-
- # if we couldn't get the prev event in question, that's a problem.
- remote_event = fetched_events.get(event_id)
- if not remote_event:
- raise Exception("Unable to get missing prev_event %s" % (event_id,))
-
- # missing state at that event is a warning, not a blocker
- # XXX: this doesn't sound right? it means that we'll end up with incomplete
- # state.
- failed_to_fetch = desired_events - fetched_events.keys()
- if failed_to_fetch:
- logger.warning(
- "Failed to fetch missing state events for %s %s",
- event_id,
- failed_to_fetch,
- )
-
- remote_state = [
- fetched_events[e_id] for e_id in state_event_ids if e_id in fetched_events
- ]
-
- if remote_event.is_state() and remote_event.rejected_reason is None:
- remote_state.append(remote_event)
-
- return remote_state
-
- async def _process_received_pdu(
- self,
- origin: str,
- event: EventBase,
- state: Optional[Iterable[EventBase]],
- ) -> None:
- """Called when we have a new pdu. We need to do auth checks and put it
- through the StateHandler.
-
- Args:
- origin: server sending the event
-
- event: event to be persisted
-
- state: Normally None, but if we are handling a gap in the graph
- (ie, we are missing one or more prev_events), the resolved state at the
- event
- """
- logger.debug("Processing event: %s", event)
-
- try:
- context = await self.state_handler.compute_event_context(
- event, old_state=state
- )
- await self._auth_and_persist_event(origin, event, context, state=state)
- except AuthError as e:
- raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
-
- # For encrypted messages we check that we know about the sending device,
- # if we don't then we mark the device cache for that user as stale.
- if event.type == EventTypes.Encrypted:
- device_id = event.content.get("device_id")
- sender_key = event.content.get("sender_key")
-
- cached_devices = await self.store.get_cached_devices_for_user(event.sender)
-
- resync = False # Whether we should resync device lists.
-
- device = None
- if device_id is not None:
- device = cached_devices.get(device_id)
- if device is None:
- logger.info(
- "Received event from remote device not in our cache: %s %s",
- event.sender,
- device_id,
- )
- resync = True
-
- # We also check if the `sender_key` matches what we expect.
- if sender_key is not None:
- # Figure out what sender key we're expecting. If we know the
- # device and recognize the algorithm then we can work out the
- # exact key to expect. Otherwise check it matches any key we
- # have for that device.
-
- current_keys: Container[str] = []
-
- if device:
- keys = device.get("keys", {}).get("keys", {})
-
- if (
- event.content.get("algorithm")
- == RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2
- ):
- # For this algorithm we expect a curve25519 key.
- key_name = "curve25519:%s" % (device_id,)
- current_keys = [keys.get(key_name)]
- else:
- # We don't know understand the algorithm, so we just
- # check it matches a key for the device.
- current_keys = keys.values()
- elif device_id:
- # We don't have any keys for the device ID.
- pass
- else:
- # The event didn't include a device ID, so we just look for
- # keys across all devices.
- current_keys = [
- key
- for device in cached_devices.values()
- for key in device.get("keys", {}).get("keys", {}).values()
- ]
-
- # We now check that the sender key matches (one of) the expected
- # keys.
- if sender_key not in current_keys:
- logger.info(
- "Received event from remote device with unexpected sender key: %s %s: %s",
- event.sender,
- device_id or "<no device_id>",
- sender_key,
- )
- resync = True
-
- if resync:
- run_as_background_process(
- "resync_device_due_to_pdu", self._resync_device, event.sender
- )
-
- async def _resync_device(self, sender: str) -> None:
- """We have detected that the device list for the given user may be out
- of sync, so we try and resync them.
- """
-
- try:
- await self.store.mark_remote_user_device_cache_as_stale(sender)
-
- # Immediately attempt a resync in the background
- if self.config.worker_app:
- await self._user_device_resync(user_id=sender)
- else:
- await self._device_list_updater.user_device_resync(sender)
- except Exception:
- logger.exception("Failed to resync device for %s", sender)
-
- @log_function
- async def backfill(
- self, dest: str, room_id: str, limit: int, extremities: List[str]
- ) -> List[EventBase]:
- """Trigger a backfill request to `dest` for the given `room_id`
-
- This will attempt to get more events from the remote. If the other side
- has no new events to offer, this will return an empty list.
-
- As the events are received, we check their signatures, and also do some
- sanity-checking on them. If any of the backfilled events are invalid,
- this method throws a SynapseError.
-
- TODO: make this more useful to distinguish failures of the remote
- server from invalid events (there is probably no point in trying to
- re-fetch invalid events from every other HS in the room.)
- """
- if dest == self.server_name:
- raise SynapseError(400, "Can't backfill from self.")
-
- events = await self.federation_client.backfill(
- dest, room_id, limit=limit, extremities=extremities
- )
-
- if not events:
- return []
-
- # ideally we'd sanity check the events here for excess prev_events etc,
- # but it's hard to reject events at this point without completely
- # breaking backfill in the same way that it is currently broken by
- # events whose signature we cannot verify (#3121).
- #
- # So for now we accept the events anyway. #3124 tracks this.
- #
- # for ev in events:
- # self._sanity_check_event(ev)
-
- # Don't bother processing events we already have.
- seen_events = await self.store.have_events_in_timeline(
- {e.event_id for e in events}
- )
-
- events = [e for e in events if e.event_id not in seen_events]
-
- if not events:
- return []
-
- event_map = {e.event_id: e for e in events}
-
- event_ids = {e.event_id for e in events}
-
- # build a list of events whose prev_events weren't in the batch.
- # (XXX: this will include events whose prev_events we already have; that doesn't
- # sound right?)
- edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids]
-
- logger.info("backfill: Got %d events with %d edges", len(events), len(edges))
-
- # For each edge get the current state.
-
- state_events = {}
- events_to_state = {}
- for e_id in edges:
- state = await self._get_state_for_room(
- destination=dest,
- room_id=room_id,
- event_id=e_id,
- )
- state_events.update({s.event_id: s for s in state})
- events_to_state[e_id] = state
-
- required_auth = {
- a_id
- for event in events + list(state_events.values())
- for a_id in event.auth_event_ids()
- }
- auth_events = await self.store.get_events(required_auth, allow_rejected=True)
- auth_events.update(
- {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
- )
-
- ev_infos = []
-
- # Step 1: persist the events in the chunk we fetched state for (i.e.
- # the backwards extremities), with custom auth events and state
- for e_id in events_to_state:
- # For paranoia we ensure that these events are marked as
- # non-outliers
- ev = event_map[e_id]
- assert not ev.internal_metadata.is_outlier()
-
- ev_infos.append(
- _NewEventInfo(
- event=ev,
- state=events_to_state[e_id],
- auth_events={
- (
- auth_events[a_id].type,
- auth_events[a_id].state_key,
- ): auth_events[a_id]
- for a_id in ev.auth_event_ids()
- if a_id in auth_events
- },
- )
- )
-
- if ev_infos:
- await self._auth_and_persist_events(
- dest, room_id, ev_infos, backfilled=True
- )
-
- # Step 2: Persist the rest of the events in the chunk one by one
- events.sort(key=lambda e: e.depth)
-
- for event in events:
- if event in events_to_state:
- continue
-
- # For paranoia we ensure that these events are marked as
- # non-outliers
- assert not event.internal_metadata.is_outlier()
-
- context = await self.state_handler.compute_event_context(event)
-
- # We store these one at a time since each event depends on the
- # previous to work out the state.
- # TODO: We can probably do something more clever here.
- await self._auth_and_persist_event(dest, event, context, backfilled=True)
-
- return events
-
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
@@ -1058,9 +136,19 @@ class FederationHandler(BaseHandler):
async def _maybe_backfill_inner(
self, room_id: str, current_depth: int, limit: int
) -> bool:
- extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
+ oldest_events_with_depth = (
+ await self.store.get_oldest_event_ids_with_depth_in_room(room_id)
+ )
+ insertion_events_to_be_backfilled = (
+ await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
+ )
+ logger.debug(
+ "_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
+ oldest_events_with_depth,
+ insertion_events_to_be_backfilled,
+ )
- if not extremities:
+ if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
logger.debug("Not backfilling as no extremeties found.")
return False
@@ -1090,10 +178,12 @@ class FederationHandler(BaseHandler):
# state *before* the event, ignoring the special casing certain event
# types have.
- forward_events = await self.store.get_successor_events(list(extremities))
+ forward_event_ids = await self.store.get_successor_events(
+ list(oldest_events_with_depth)
+ )
extremities_events = await self.store.get_events(
- forward_events,
+ forward_event_ids,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
)
@@ -1107,10 +197,19 @@ class FederationHandler(BaseHandler):
redact=False,
check_history_visibility_only=True,
)
+ logger.debug(
+ "_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
+ )
- if not filtered_extremities:
+ if not filtered_extremities and not insertion_events_to_be_backfilled:
return False
+ extremities = {
+ **oldest_events_with_depth,
+ # TODO: insertion_events_to_be_backfilled is currently skipping the filtered_extremities checks
+ **insertion_events_to_be_backfilled,
+ }
+
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
max_depth = sorted_extremeties_tuple[0][1]
@@ -1210,14 +309,14 @@ class FederationHandler(BaseHandler):
# TODO: Should we try multiple of these at a time?
for dom in domains:
try:
- await self.backfill(
+ await self._federation_event_handler.backfill(
dom, room_id, limit=100, extremities=extremities
)
# If this succeeded then we probably already have the
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
- except SynapseError as e:
+ except (SynapseError, InvalidResponseError) as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except HttpResponseException as e:
@@ -1301,115 +400,6 @@ class FederationHandler(BaseHandler):
return False
- async def _get_events_and_persist(
- self, destination: str, room_id: str, events: Iterable[str]
- ) -> None:
- """Fetch the given events from a server, and persist them as outliers.
-
- This function *does not* recursively get missing auth events of the
- newly fetched events. Callers must include in the `events` argument
- any missing events from the auth chain.
-
- Logs a warning if we can't find the given event.
- """
-
- room_version = await self.store.get_room_version(room_id)
-
- event_map: Dict[str, EventBase] = {}
-
- async def get_event(event_id: str):
- with nested_logging_context(event_id):
- try:
- event = await self.federation_client.get_pdu(
- [destination],
- event_id,
- room_version,
- outlier=True,
- )
- if event is None:
- logger.warning(
- "Server %s didn't return event %s",
- destination,
- event_id,
- )
- return
-
- event_map[event.event_id] = event
-
- except Exception as e:
- logger.warning(
- "Error fetching missing state/auth event %s: %s %s",
- event_id,
- type(e),
- e,
- )
-
- await concurrently_execute(get_event, events, 5)
-
- # Make a map of auth events for each event. We do this after fetching
- # all the events as some of the events' auth events will be in the list
- # of requested events.
-
- auth_events = [
- aid
- for event in event_map.values()
- for aid in event.auth_event_ids()
- if aid not in event_map
- ]
- persisted_events = await self.store.get_events(
- auth_events,
- allow_rejected=True,
- )
-
- event_infos = []
- for event in event_map.values():
- auth = {}
- for auth_event_id in event.auth_event_ids():
- ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id)
- if ae:
- auth[(ae.type, ae.state_key)] = ae
- else:
- logger.info("Missing auth event %s", auth_event_id)
-
- event_infos.append(_NewEventInfo(event, None, auth))
-
- if event_infos:
- await self._auth_and_persist_events(
- destination,
- room_id,
- event_infos,
- )
-
- def _sanity_check_event(self, ev: EventBase) -> None:
- """
- Do some early sanity checks of a received event
-
- In particular, checks it doesn't have an excessive number of
- prev_events or auth_events, which could cause a huge state resolution
- or cascade of event fetches.
-
- Args:
- ev: event to be checked
-
- Raises:
- SynapseError if the event does not pass muster
- """
- if len(ev.prev_event_ids()) > 20:
- logger.warning(
- "Rejecting event %s which has %i prev_events",
- ev.event_id,
- len(ev.prev_event_ids()),
- )
- raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events")
-
- if len(ev.auth_event_ids()) > 10:
- logger.warning(
- "Rejecting event %s which has %i auth_events",
- ev.event_id,
- len(ev.auth_event_ids()),
- )
- raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
-
async def send_invite(self, target_host: str, event: EventBase) -> EventBase:
"""Sends the invite to the remote server for signing.
@@ -1476,9 +466,9 @@ class FederationHandler(BaseHandler):
# This shouldn't happen, because the RoomMemberHandler has a
# linearizer lock which only allows one operation per user per room
# at a time - so this is just paranoia.
- assert room_id not in self.room_queues
+ assert room_id not in self._federation_event_handler.room_queues
- self.room_queues[room_id] = []
+ self._federation_event_handler.room_queues[room_id] = []
await self._clean_room_for_join(room_id)
@@ -1520,6 +510,7 @@ class FederationHandler(BaseHandler):
await self.store.upsert_room_on_join(
room_id=room_id,
room_version=room_version_obj,
+ auth_events=auth_chain,
)
with start_active_span("_persist_auth_tree"):
@@ -1554,8 +545,8 @@ class FederationHandler(BaseHandler):
logger.debug("Finished joining %s to %s", joinee, room_id)
return event.event_id, max_stream_id
finally:
- room_queue = self.room_queues[room_id]
- del self.room_queues[room_id]
+ room_queue = self._federation_event_handler.room_queues[room_id]
+ del self._federation_event_handler.room_queues[room_id]
# we don't need to wait for the queued events to be processed -
# it's just a best-effort thing at this point. We do want to do
@@ -1631,7 +622,7 @@ class FederationHandler(BaseHandler):
event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
context = await self.state_handler.compute_event_context(event)
- stream_id = await self.persist_events_and_notify(
+ stream_id = await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
return event.event_id, stream_id
@@ -1647,13 +638,11 @@ class FederationHandler(BaseHandler):
for p, origin in room_queue:
try:
logger.info(
- "Processing queued PDU %s which was received "
- "while we were joining %s",
- p.event_id,
- p.room_id,
+ "Processing queued PDU %s which was received while we were joining",
+ p,
)
with nested_logging_context(p.event_id):
- await self.on_receive_pdu(origin, p, sent_to_us_directly=True)
+ await self._federation_event_handler.on_receive_pdu(origin, p)
except Exception as e:
logger.warning(
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e
@@ -1746,7 +735,7 @@ class FederationHandler(BaseHandler):
raise
# Ensure the user can even join the room.
- await self._check_join_restrictions(context, event)
+ await self._federation_event_handler.check_join_restrictions(context, event)
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
@@ -1823,7 +812,9 @@ class FederationHandler(BaseHandler):
)
context = await self.state_handler.compute_event_context(event)
- await self.persist_events_and_notify(event.room_id, [(event, context)])
+ await self._federation_event_handler.persist_events_and_notify(
+ event.room_id, [(event, context)]
+ )
return event
@@ -1850,7 +841,7 @@ class FederationHandler(BaseHandler):
await self.federation_client.send_leave(host_list, event)
context = await self.state_handler.compute_event_context(event)
- stream_id = await self.persist_events_and_notify(
+ stream_id = await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@@ -1993,116 +984,6 @@ class FederationHandler(BaseHandler):
return event
- @log_function
- async def on_send_membership_event(
- self, origin: str, event: EventBase
- ) -> Tuple[EventBase, EventContext]:
- """
- We have received a join/leave/knock event for a room via send_join/leave/knock.
-
- Verify that event and send it into the room on the remote homeserver's behalf.
-
- This is quite similar to on_receive_pdu, with the following principal
- differences:
- * only membership events are permitted (and only events with
- sender==state_key -- ie, no kicks or bans)
- * *We* send out the event on behalf of the remote server.
- * We enforce the membership restrictions of restricted rooms.
- * Rejected events result in an exception rather than being stored.
-
- There are also other differences, however it is not clear if these are by
- design or omission. In particular, we do not attempt to backfill any missing
- prev_events.
-
- Args:
- origin: The homeserver of the remote (joining/invited/knocking) user.
- event: The member event that has been signed by the remote homeserver.
-
- Returns:
- The event and context of the event after inserting it into the room graph.
-
- Raises:
- SynapseError if the event is not accepted into the room
- """
- logger.debug(
- "on_send_membership_event: Got event: %s, signatures: %s",
- event.event_id,
- event.signatures,
- )
-
- if get_domain_from_id(event.sender) != origin:
- logger.info(
- "Got send_membership request for user %r from different origin %s",
- event.sender,
- origin,
- )
- raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
-
- if event.sender != event.state_key:
- raise SynapseError(400, "state_key and sender must match", Codes.BAD_JSON)
-
- assert not event.internal_metadata.outlier
-
- # Send this event on behalf of the other server.
- #
- # The remote server isn't a full participant in the room at this point, so
- # may not have an up-to-date list of the other homeservers participating in
- # the room, so we send it on their behalf.
- event.internal_metadata.send_on_behalf_of = origin
-
- context = await self.state_handler.compute_event_context(event)
- context = await self._check_event_auth(origin, event, context)
- if context.rejected:
- raise SynapseError(
- 403, f"{event.membership} event was rejected", Codes.FORBIDDEN
- )
-
- # for joins, we need to check the restrictions of restricted rooms
- if event.membership == Membership.JOIN:
- await self._check_join_restrictions(context, event)
-
- # for knock events, we run the third-party event rules. It's not entirely clear
- # why we don't do this for other sorts of membership events.
- if event.membership == Membership.KNOCK:
- event_allowed, _ = await self.third_party_event_rules.check_event_allowed(
- event, context
- )
- if not event_allowed:
- logger.info("Sending of knock %s forbidden by third-party rules", event)
- raise SynapseError(
- 403, "This event is not allowed in this context", Codes.FORBIDDEN
- )
-
- # all looks good, we can persist the event.
- await self._run_push_actions_and_persist_event(event, context)
- return event, context
-
- async def _check_join_restrictions(
- self, context: EventContext, event: EventBase
- ) -> None:
- """Check that restrictions in restricted join rules are matched
-
- Called when we receive a join event via send_join.
-
- Raises an auth error if the restrictions are not matched.
- """
- prev_state_ids = await context.get_prev_state_ids()
-
- # Check if the user is already in the room or invited to the room.
- user_id = event.state_key
- prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
- prev_member_event = None
- if prev_member_event_id:
- prev_member_event = await self.store.get_event(prev_member_event_id)
-
- # Check if the member should be allowed access via membership in a space.
- await self._event_auth_handler.check_restricted_join_rules(
- prev_state_ids,
- event.room_version,
- user_id,
- prev_member_event,
- )
-
async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]:
"""Returns the state at the event. i.e. not including said event."""
@@ -2203,130 +1084,6 @@ class FederationHandler(BaseHandler):
else:
return None
- async def get_min_depth_for_context(self, context: str) -> int:
- return await self.store.get_min_depth(context)
-
- async def _auth_and_persist_event(
- self,
- origin: str,
- event: EventBase,
- context: EventContext,
- state: Optional[Iterable[EventBase]] = None,
- auth_events: Optional[MutableStateMap[EventBase]] = None,
- backfilled: bool = False,
- ) -> None:
- """
- Process an event by performing auth checks and then persisting to the database.
-
- Args:
- origin: The host the event originates from.
- event: The event itself.
- context:
- The event context.
-
- NB that this function potentially modifies it.
- state:
- The state events used to check the event for soft-fail. If this is
- not provided the current state events will be used.
- auth_events:
- Map from (event_type, state_key) to event
-
- Normally, our calculated auth_events based on the state of the room
- at the event's position in the DAG, though occasionally (eg if the
- event is an outlier), may be the auth events claimed by the remote
- server.
- backfilled: True if the event was backfilled.
- """
- context = await self._check_event_auth(
- origin,
- event,
- context,
- state=state,
- auth_events=auth_events,
- backfilled=backfilled,
- )
-
- await self._run_push_actions_and_persist_event(event, context, backfilled)
-
- async def _run_push_actions_and_persist_event(
- self, event: EventBase, context: EventContext, backfilled: bool = False
- ):
- """Run the push actions for a received event, and persist it.
-
- Args:
- event: The event itself.
- context: The event context.
- backfilled: True if the event was backfilled.
- """
- try:
- if (
- not event.internal_metadata.is_outlier()
- and not backfilled
- and not context.rejected
- ):
- await self.action_generator.handle_push_actions_for_event(
- event, context
- )
-
- await self.persist_events_and_notify(
- event.room_id, [(event, context)], backfilled=backfilled
- )
- except Exception:
- run_in_background(
- self.store.remove_push_actions_from_staging, event.event_id
- )
- raise
-
- async def _auth_and_persist_events(
- self,
- origin: str,
- room_id: str,
- event_infos: Collection[_NewEventInfo],
- backfilled: bool = False,
- ) -> None:
- """Creates the appropriate contexts and persists events. The events
- should not depend on one another, e.g. this should be used to persist
- a bunch of outliers, but not a chunk of individual events that depend
- on each other for state calculations.
-
- Notifies about the events where appropriate.
- """
-
- if not event_infos:
- return
-
- async def prep(ev_info: _NewEventInfo):
- event = ev_info.event
- with nested_logging_context(suffix=event.event_id):
- res = await self.state_handler.compute_event_context(
- event, old_state=ev_info.state
- )
- res = await self._check_event_auth(
- origin,
- event,
- res,
- state=ev_info.state,
- auth_events=ev_info.auth_events,
- backfilled=backfilled,
- )
- return res
-
- contexts = await make_deferred_yieldable(
- defer.gatherResults(
- [run_in_background(prep, ev_info) for ev_info in event_infos],
- consumeErrors=True,
- )
- )
-
- await self.persist_events_and_notify(
- room_id,
- [
- (ev_info.event, context)
- for ev_info, context in zip(event_infos, contexts)
- ],
- backfilled=backfilled,
- )
-
async def _persist_auth_tree(
self,
origin: str,
@@ -2427,7 +1184,7 @@ class FederationHandler(BaseHandler):
if auth_events or state:
with start_active_span("persist_events_and_notify.state"):
- await self.persist_events_and_notify(
+ await self._federation_event_handler.persist_events_and_notify(
room_id,
[
(e, events_to_context[e.event_id])
@@ -2439,108 +1196,10 @@ class FederationHandler(BaseHandler):
event, old_state=state
)
- return await self.persist_events_and_notify(
+ return await self._federation_event_handler.persist_events_and_notify(
room_id, [(event, new_event_context)]
)
- async def _check_for_soft_fail(
- self,
- event: EventBase,
- state: Optional[Iterable[EventBase]],
- backfilled: bool,
- origin: str,
- ) -> None:
- """Checks if we should soft fail the event; if so, marks the event as
- such.
-
- Args:
- event
- state: The state at the event if we don't have all the event's prev events
- backfilled: Whether the event is from backfill
- origin: The host the event originates from.
- """
- # For new (non-backfilled and non-outlier) events we check if the event
- # passes auth based on the current state. If it doesn't then we
- # "soft-fail" the event.
- if backfilled or event.internal_metadata.is_outlier():
- return
-
- extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id)
- extrem_ids = set(extrem_ids_list)
- prev_event_ids = set(event.prev_event_ids())
-
- if extrem_ids == prev_event_ids:
- # If they're the same then the current state is the same as the
- # state at the event, so no point rechecking auth for soft fail.
- return
-
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- # Calculate the "current state".
- if state is not None:
- # If we're explicitly given the state then we won't have all the
- # prev events, and so we have a gap in the graph. In this case
- # we want to be a little careful as we might have been down for
- # a while and have an incorrect view of the current state,
- # however we still want to do checks as gaps are easy to
- # maliciously manufacture.
- #
- # So we use a "current state" that is actually a state
- # resolution across the current forward extremities and the
- # given state at the event. This should correctly handle cases
- # like bans, especially with state res v2.
-
- state_sets_d = await self.state_store.get_state_groups(
- event.room_id, extrem_ids
- )
- state_sets: List[Iterable[EventBase]] = list(state_sets_d.values())
- state_sets.append(state)
- current_states = await self.state_handler.resolve_events(
- room_version, state_sets, event
- )
- current_state_ids: StateMap[str] = {
- k: e.event_id for k, e in current_states.items()
- }
- else:
- current_state_ids = await self.state_handler.get_current_state_ids(
- event.room_id, latest_event_ids=extrem_ids
- )
-
- logger.debug(
- "Doing soft-fail check for %s: state %s",
- event.event_id,
- current_state_ids,
- )
-
- # Now check if event pass auth against said current state
- auth_types = auth_types_for_event(room_version_obj, event)
- current_state_ids_list = [
- e for k, e in current_state_ids.items() if k in auth_types
- ]
-
- auth_events_map = await self.store.get_events(current_state_ids_list)
- current_auth_events = {
- (e.type, e.state_key): e for e in auth_events_map.values()
- }
-
- try:
- event_auth.check(room_version_obj, event, auth_events=current_auth_events)
- except AuthError as e:
- logger.warning(
- "Soft-failing %r (from %s) because %s",
- event,
- e,
- origin,
- extra={
- "room_id": event.room_id,
- "mxid": event.sender,
- "hs": origin,
- },
- )
- soft_failed_event_counter.inc()
- event.internal_metadata.soft_failed = True
-
async def on_get_missing_events(
self,
origin: str,
@@ -2569,333 +1228,6 @@ class FederationHandler(BaseHandler):
return missing_events
- async def _check_event_auth(
- self,
- origin: str,
- event: EventBase,
- context: EventContext,
- state: Optional[Iterable[EventBase]] = None,
- auth_events: Optional[MutableStateMap[EventBase]] = None,
- backfilled: bool = False,
- ) -> EventContext:
- """
- Checks whether an event should be rejected (for failing auth checks).
-
- Args:
- origin: The host the event originates from.
- event: The event itself.
- context:
- The event context.
-
- NB that this function potentially modifies it.
- state:
- The state events used to check the event for soft-fail. If this is
- not provided the current state events will be used.
- auth_events:
- Map from (event_type, state_key) to event
-
- Normally, our calculated auth_events based on the state of the room
- at the event's position in the DAG, though occasionally (eg if the
- event is an outlier), may be the auth events claimed by the remote
- server.
-
- Also NB that this function adds entries to it.
-
- If this is not provided, it is calculated from the previous state IDs.
- backfilled: True if the event was backfilled.
-
- Returns:
- The updated context object.
- """
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- if not auth_events:
- prev_state_ids = await context.get_prev_state_ids()
- auth_events_ids = self._event_auth_handler.compute_auth_events(
- event, prev_state_ids, for_verification=True
- )
- auth_events_x = await self.store.get_events(auth_events_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
-
- # This is a hack to fix some old rooms where the initial join event
- # didn't reference the create event in its auth events.
- if event.type == EventTypes.Member and not event.auth_event_ids():
- if len(event.prev_event_ids()) == 1 and event.depth < 5:
- c = await self.store.get_event(
- event.prev_event_ids()[0], allow_none=True
- )
- if c and c.type == EventTypes.Create:
- auth_events[(c.type, c.state_key)] = c
-
- try:
- context = await self._update_auth_events_and_context_for_auth(
- origin, event, context, auth_events
- )
- except Exception:
- # We don't really mind if the above fails, so lets not fail
- # processing if it does. However, it really shouldn't fail so
- # let's still log as an exception since we'll still want to fix
- # any bugs.
- logger.exception(
- "Failed to double check auth events for %s with remote. "
- "Ignoring failure and continuing processing of event.",
- event.event_id,
- )
-
- try:
- event_auth.check(room_version_obj, event, auth_events=auth_events)
- except AuthError as e:
- logger.warning("Failed auth resolution for %r because %s", event, e)
- context.rejected = RejectedReason.AUTH_ERROR
-
- if not context.rejected:
- await self._check_for_soft_fail(event, state, backfilled, origin=origin)
-
- if event.type == EventTypes.GuestAccess and not context.rejected:
- await self.maybe_kick_guest_users(event)
-
- # If we are going to send this event over federation we precaclculate
- # the joined hosts.
- if event.internal_metadata.get_send_on_behalf_of():
- await self.event_creation_handler.cache_joined_hosts_for_event(
- event, context
- )
-
- return context
-
- async def _update_auth_events_and_context_for_auth(
- self,
- origin: str,
- event: EventBase,
- context: EventContext,
- auth_events: MutableStateMap[EventBase],
- ) -> EventContext:
- """Helper for _check_event_auth. See there for docs.
-
- Checks whether a given event has the expected auth events. If it
- doesn't then we talk to the remote server to compare state to see if
- we can come to a consensus (e.g. if one server missed some valid
- state).
-
- This attempts to resolve any potential divergence of state between
- servers, but is not essential and so failures should not block further
- processing of the event.
-
- Args:
- origin:
- event:
- context:
-
- auth_events:
- Map from (event_type, state_key) to event
-
- Normally, our calculated auth_events based on the state of the room
- at the event's position in the DAG, though occasionally (eg if the
- event is an outlier), may be the auth events claimed by the remote
- server.
-
- Also NB that this function adds entries to it.
-
- Returns:
- updated context
- """
- event_auth_events = set(event.auth_event_ids())
-
- # missing_auth is the set of the event's auth_events which we don't yet have
- # in auth_events.
- missing_auth = event_auth_events.difference(
- e.event_id for e in auth_events.values()
- )
-
- # if we have missing events, we need to fetch those events from somewhere.
- #
- # we start by checking if they are in the store, and then try calling /event_auth/.
- if missing_auth:
- have_events = await self.store.have_seen_events(event.room_id, missing_auth)
- logger.debug("Events %s are in the store", have_events)
- missing_auth.difference_update(have_events)
-
- if missing_auth:
- # If we don't have all the auth events, we need to get them.
- logger.info("auth_events contains unknown events: %s", missing_auth)
- try:
- try:
- remote_auth_chain = await self.federation_client.get_event_auth(
- origin, event.room_id, event.event_id
- )
- except RequestSendFailed as e1:
- # The other side isn't around or doesn't implement the
- # endpoint, so lets just bail out.
- logger.info("Failed to get event auth from remote: %s", e1)
- return context
-
- seen_remotes = await self.store.have_seen_events(
- event.room_id, [e.event_id for e in remote_auth_chain]
- )
-
- for e in remote_auth_chain:
- if e.event_id in seen_remotes:
- continue
-
- if e.event_id == event.event_id:
- continue
-
- try:
- auth_ids = e.auth_event_ids()
- auth = {
- (e.type, e.state_key): e
- for e in remote_auth_chain
- if e.event_id in auth_ids or e.type == EventTypes.Create
- }
- e.internal_metadata.outlier = True
-
- logger.debug(
- "_check_event_auth %s missing_auth: %s",
- event.event_id,
- e.event_id,
- )
- missing_auth_event_context = (
- await self.state_handler.compute_event_context(e)
- )
- await self._auth_and_persist_event(
- origin, e, missing_auth_event_context, auth_events=auth
- )
-
- if e.event_id in event_auth_events:
- auth_events[(e.type, e.state_key)] = e
- except AuthError:
- pass
-
- except Exception:
- logger.exception("Failed to get auth chain")
-
- if event.internal_metadata.is_outlier():
- # XXX: given that, for an outlier, we'll be working with the
- # event's *claimed* auth events rather than those we calculated:
- # (a) is there any point in this test, since different_auth below will
- # obviously be empty
- # (b) alternatively, why don't we do it earlier?
- logger.info("Skipping auth_event fetch for outlier")
- return context
-
- different_auth = event_auth_events.difference(
- e.event_id for e in auth_events.values()
- )
-
- if not different_auth:
- return context
-
- logger.info(
- "auth_events refers to events which are not in our calculated auth "
- "chain: %s",
- different_auth,
- )
-
- # XXX: currently this checks for redactions but I'm not convinced that is
- # necessary?
- different_events = await self.store.get_events_as_list(different_auth)
-
- for d in different_events:
- if d.room_id != event.room_id:
- logger.warning(
- "Event %s refers to auth_event %s which is in a different room",
- event.event_id,
- d.event_id,
- )
-
- # don't attempt to resolve the claimed auth events against our own
- # in this case: just use our own auth events.
- #
- # XXX: should we reject the event in this case? It feels like we should,
- # but then shouldn't we also do so if we've failed to fetch any of the
- # auth events?
- return context
-
- # now we state-resolve between our own idea of the auth events, and the remote's
- # idea of them.
-
- local_state = auth_events.values()
- remote_auth_events = dict(auth_events)
- remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
- remote_state = remote_auth_events.values()
-
- room_version = await self.store.get_room_version_id(event.room_id)
- new_state = await self.state_handler.resolve_events(
- room_version, (local_state, remote_state), event
- )
-
- logger.info(
- "After state res: updating auth_events with new state %s",
- {
- (d.type, d.state_key): d.event_id
- for d in new_state.values()
- if auth_events.get((d.type, d.state_key)) != d
- },
- )
-
- auth_events.update(new_state)
-
- context = await self._update_context_for_auth_events(
- event, context, auth_events
- )
-
- return context
-
- async def _update_context_for_auth_events(
- self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
- ) -> EventContext:
- """Update the state_ids in an event context after auth event resolution,
- storing the changes as a new state group.
-
- Args:
- event: The event we're handling the context for
-
- context: initial event context
-
- auth_events: Events to update in the event context.
-
- Returns:
- new event context
- """
- # exclude the state key of the new event from the current_state in the context.
- if event.is_state():
- event_key: Optional[Tuple[str, str]] = (event.type, event.state_key)
- else:
- event_key = None
- state_updates = {
- k: a.event_id for k, a in auth_events.items() if k != event_key
- }
-
- current_state_ids = await context.get_current_state_ids()
- current_state_ids = dict(current_state_ids) # type: ignore
-
- current_state_ids.update(state_updates)
-
- prev_state_ids = await context.get_prev_state_ids()
- prev_state_ids = dict(prev_state_ids)
-
- prev_state_ids.update({k: a.event_id for k, a in auth_events.items()})
-
- # create a new state group as a delta from the existing one.
- prev_group = context.state_group
- state_group = await self.state_store.store_state_group(
- event.event_id,
- event.room_id,
- prev_group=prev_group,
- delta_ids=state_updates,
- current_state_ids=current_state_ids,
- )
-
- return EventContext.with_state(
- state_group=state_group,
- state_group_before_event=context.state_group_before_event,
- current_state_ids=current_state_ids,
- prev_state_ids=prev_state_ids,
- prev_group=prev_group,
- delta_ids=state_updates,
- )
-
async def construct_auth_difference(
self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase]
) -> Dict:
@@ -3282,99 +1614,6 @@ class FederationHandler(BaseHandler):
if "valid" not in response or not response["valid"]:
raise AuthError(403, "Third party certificate was invalid")
- async def persist_events_and_notify(
- self,
- room_id: str,
- event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
- backfilled: bool = False,
- ) -> int:
- """Persists events and tells the notifier/pushers about them, if
- necessary.
-
- Args:
- room_id: The room ID of events being persisted.
- event_and_contexts: Sequence of events with their associated
- context that should be persisted. All events must belong to
- the same room.
- backfilled: Whether these events are a result of
- backfilling or not
-
- Returns:
- The stream ID after which all events have been persisted.
- """
- if not event_and_contexts:
- return self.store.get_current_events_token()
-
- instance = self.config.worker.events_shard_config.get_instance(room_id)
- if instance != self._instance_name:
- # Limit the number of events sent over replication. We choose 200
- # here as that is what we default to in `max_request_body_size(..)`
- for batch in batch_iter(event_and_contexts, 200):
- result = await self._send_events(
- instance_name=instance,
- store=self.store,
- room_id=room_id,
- event_and_contexts=batch,
- backfilled=backfilled,
- )
- return result["max_stream_id"]
- else:
- assert self.storage.persistence
-
- # Note that this returns the events that were persisted, which may not be
- # the same as were passed in if some were deduplicated due to transaction IDs.
- events, max_stream_token = await self.storage.persistence.persist_events(
- event_and_contexts, backfilled=backfilled
- )
-
- if self._ephemeral_messages_enabled:
- for event in events:
- # If there's an expiry timestamp on the event, schedule its expiry.
- self._message_handler.maybe_schedule_expiry(event)
-
- if not backfilled: # Never notify for backfilled events
- for event in events:
- await self._notify_persisted_event(event, max_stream_token)
-
- return max_stream_token.stream
-
- async def _notify_persisted_event(
- self, event: EventBase, max_stream_token: RoomStreamToken
- ) -> None:
- """Checks to see if notifier/pushers should be notified about the
- event or not.
-
- Args:
- event:
- max_stream_id: The max_stream_id returned by persist_events
- """
-
- extra_users = []
- if event.type == EventTypes.Member:
- target_user_id = event.state_key
-
- # We notify for memberships if its an invite for one of our
- # users
- if event.internal_metadata.is_outlier():
- if event.membership != Membership.INVITE:
- if not self.is_mine_id(target_user_id):
- return
-
- target_user = UserID.from_string(target_user_id)
- extra_users.append(target_user)
- elif event.internal_metadata.is_outlier():
- return
-
- # the event has been persisted so it should have a stream ordering.
- assert event.internal_metadata.stream_ordering
-
- event_pos = PersistedEventPosition(
- self._instance_name, event.internal_metadata.stream_ordering
- )
- self.notifier.on_new_room_event(
- event, event_pos, max_stream_token, extra_users=extra_users
- )
-
async def _clean_room_for_join(self, room_id: str) -> None:
"""Called to clean up any data in DB for a given room, ready for the
server to join the room.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
new file mode 100644
index 0000000000..69f8287b2b
--- /dev/null
+++ b/synapse/handlers/federation_event.py
@@ -0,0 +1,1845 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from http import HTTPStatus
+from typing import (
+ TYPE_CHECKING,
+ Collection,
+ Container,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+)
+
+import attr
+from prometheus_client import Counter
+
+from twisted.internet import defer
+
+from synapse import event_auth
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ GuestAccess,
+ Membership,
+ RejectedReason,
+ RoomEncryptionAlgorithms,
+)
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ FederationError,
+ HttpResponseException,
+ RequestSendFailed,
+ SynapseError,
+)
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.event_auth import auth_types_for_event
+from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
+from synapse.federation.federation_client import InvalidResponseError
+from synapse.logging.context import (
+ make_deferred_yieldable,
+ nested_logging_context,
+ run_in_background,
+)
+from synapse.logging.utils import log_function
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
+from synapse.replication.http.federation import (
+ ReplicationFederationSendEventsRestServlet,
+)
+from synapse.state import StateResolutionStore
+from synapse.storage.databases.main.events_worker import EventRedactBehaviour
+from synapse.types import (
+ MutableStateMap,
+ PersistedEventPosition,
+ RoomStreamToken,
+ StateMap,
+ UserID,
+ get_domain_from_id,
+)
+from synapse.util.async_helpers import Linearizer, concurrently_execute
+from synapse.util.iterutils import batch_iter
+from synapse.util.retryutils import NotRetryingDestination
+from synapse.util.stringutils import shortstr
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+logger = logging.getLogger(__name__)
+
+soft_failed_event_counter = Counter(
+ "synapse_federation_soft_failed_events_total",
+ "Events received over federation that we marked as soft_failed",
+)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _NewEventInfo:
+ """Holds information about a received event, ready for passing to _auth_and_persist_events
+
+ Attributes:
+ event: the received event
+
+ claimed_auth_event_map: a map of (type, state_key) => event for the event's
+ claimed auth_events.
+
+ This can include events which have not yet been persisted, in the case that
+ we are backfilling a batch of events.
+
+ Note: May be incomplete: if we were unable to find all of the claimed auth
+ events. Also, treat the contents with caution: the events might also have
+ been rejected, might not yet have been authorized themselves, or they might
+ be in the wrong room.
+
+ """
+
+ event: EventBase
+ claimed_auth_event_map: StateMap[EventBase]
+
+
+class FederationEventHandler:
+ """Handles events that originated from federation.
+
+ Responsible for handing incoming events and passing them on to the rest
+ of the homeserver (including auth and state conflict resolutions)
+ """
+
+ def __init__(self, hs: "HomeServer"):
+ self._store = hs.get_datastore()
+ self._storage = hs.get_storage()
+ self._state_store = self._storage.state
+
+ self._state_handler = hs.get_state_handler()
+ self._event_creation_handler = hs.get_event_creation_handler()
+ self._event_auth_handler = hs.get_event_auth_handler()
+ self._message_handler = hs.get_message_handler()
+ self._action_generator = hs.get_action_generator()
+ self._state_resolution_handler = hs.get_state_resolution_handler()
+ # avoid a circular dependency by deferring execution here
+ self._get_room_member_handler = hs.get_room_member_handler
+
+ self._federation_client = hs.get_federation_client()
+ self._third_party_event_rules = hs.get_third_party_event_rules()
+ self._notifier = hs.get_notifier()
+
+ self._is_mine_id = hs.is_mine_id
+ self._server_name = hs.hostname
+ self._instance_name = hs.get_instance_name()
+
+ self._config = hs.config
+ self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
+
+ self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
+ if hs.config.worker_app:
+ self._user_device_resync = (
+ ReplicationUserDevicesResyncRestServlet.make_client(hs)
+ )
+ else:
+ self._device_list_updater = hs.get_device_handler().device_list_updater
+
+ # When joining a room we need to queue any events for that room up.
+ # For each room, a list of (pdu, origin) tuples.
+ # TODO: replace this with something more elegant, probably based around the
+ # federation event staging area.
+ self.room_queues: Dict[str, List[Tuple[EventBase, str]]] = {}
+
+ self._room_pdu_linearizer = Linearizer("fed_room_pdu")
+
+ async def on_receive_pdu(self, origin: str, pdu: EventBase) -> None:
+ """Process a PDU received via a federation /send/ transaction
+
+ Args:
+ origin: server which initiated the /send/ transaction. Will
+ be used to fetch missing events or state.
+ pdu: received PDU
+ """
+
+ room_id = pdu.room_id
+ event_id = pdu.event_id
+
+ # We reprocess pdus when we have seen them only as outliers
+ existing = await self._store.get_event(
+ event_id, allow_none=True, allow_rejected=True
+ )
+
+ # FIXME: Currently we fetch an event again when we already have it
+ # if it has been marked as an outlier.
+ if existing:
+ if not existing.internal_metadata.is_outlier():
+ logger.info(
+ "Ignoring received event %s which we have already seen", event_id
+ )
+ return
+ if pdu.internal_metadata.is_outlier():
+ logger.info(
+ "Ignoring received outlier %s which we already have as an outlier",
+ event_id,
+ )
+ return
+ logger.info("De-outliering event %s", event_id)
+
+ # do some initial sanity-checking of the event. In particular, make
+ # sure it doesn't have hundreds of prev_events or auth_events, which
+ # could cause a huge state resolution or cascade of event fetches.
+ try:
+ self._sanity_check_event(pdu)
+ except SynapseError as err:
+ logger.warning("Received event failed sanity checks")
+ raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
+
+ # If we are currently in the process of joining this room, then we
+ # queue up events for later processing.
+ if room_id in self.room_queues:
+ logger.info(
+ "Queuing PDU from %s for now: join in progress",
+ origin,
+ )
+ self.room_queues[room_id].append((pdu, origin))
+ return
+
+ # If we're not in the room just ditch the event entirely. This is
+ # probably an old server that has come back and thinks we're still in
+ # the room (or we've been rejoined to the room by a state reset).
+ #
+ # Note that if we were never in the room then we would have already
+ # dropped the event, since we wouldn't know the room version.
+ is_in_room = await self._event_auth_handler.check_host_in_room(
+ room_id, self._server_name
+ )
+ if not is_in_room:
+ logger.info(
+ "Ignoring PDU from %s as we're not in the room",
+ origin,
+ )
+ return None
+
+ # Check that the event passes auth based on the state at the event. This is
+ # done for events that are to be added to the timeline (non-outliers).
+ #
+ # Get missing pdus if necessary:
+ # - Fetching any missing prev events to fill in gaps in the graph
+ # - Fetching state if we have a hole in the graph
+ if not pdu.internal_metadata.is_outlier():
+ prevs = set(pdu.prev_event_ids())
+ seen = await self._store.have_events_in_timeline(prevs)
+ missing_prevs = prevs - seen
+
+ if missing_prevs:
+ # We only backfill backwards to the min depth.
+ min_depth = await self.get_min_depth_for_context(pdu.room_id)
+ logger.debug("min_depth: %d", min_depth)
+
+ if min_depth is not None and pdu.depth > min_depth:
+ # If we're missing stuff, ensure we only fetch stuff one
+ # at a time.
+ logger.info(
+ "Acquiring room lock to fetch %d missing prev_events: %s",
+ len(missing_prevs),
+ shortstr(missing_prevs),
+ )
+ with (await self._room_pdu_linearizer.queue(pdu.room_id)):
+ logger.info(
+ "Acquired room lock to fetch %d missing prev_events",
+ len(missing_prevs),
+ )
+
+ try:
+ await self._get_missing_events_for_pdu(
+ origin, pdu, prevs, min_depth
+ )
+ except Exception as e:
+ raise Exception(
+ "Error fetching missing prev_events for %s: %s"
+ % (event_id, e)
+ ) from e
+
+ # Update the set of things we've seen after trying to
+ # fetch the missing stuff
+ seen = await self._store.have_events_in_timeline(prevs)
+ missing_prevs = prevs - seen
+
+ if not missing_prevs:
+ logger.info("Found all missing prev_events")
+
+ if missing_prevs:
+ # since this event was pushed to us, it is possible for it to
+ # become the only forward-extremity in the room, and we would then
+ # trust its state to be the state for the whole room. This is very
+ # bad. Further, if the event was pushed to us, there is no excuse
+ # for us not to have all the prev_events. (XXX: apart from
+ # min_depth?)
+ #
+ # We therefore reject any such events.
+ logger.warning(
+ "Rejecting: failed to fetch %d prev events: %s",
+ len(missing_prevs),
+ shortstr(missing_prevs),
+ )
+ raise FederationError(
+ "ERROR",
+ 403,
+ (
+ "Your server isn't divulging details about prev_events "
+ "referenced in this event."
+ ),
+ affected=pdu.event_id,
+ )
+
+ await self._process_received_pdu(origin, pdu, state=None)
+
+ @log_function
+ async def on_send_membership_event(
+ self, origin: str, event: EventBase
+ ) -> Tuple[EventBase, EventContext]:
+ """
+ We have received a join/leave/knock event for a room via send_join/leave/knock.
+
+ Verify that event and send it into the room on the remote homeserver's behalf.
+
+ This is quite similar to on_receive_pdu, with the following principal
+ differences:
+ * only membership events are permitted (and only events with
+ sender==state_key -- ie, no kicks or bans)
+ * *We* send out the event on behalf of the remote server.
+ * We enforce the membership restrictions of restricted rooms.
+ * Rejected events result in an exception rather than being stored.
+
+ There are also other differences, however it is not clear if these are by
+ design or omission. In particular, we do not attempt to backfill any missing
+ prev_events.
+
+ Args:
+ origin: The homeserver of the remote (joining/invited/knocking) user.
+ event: The member event that has been signed by the remote homeserver.
+
+ Returns:
+ The event and context of the event after inserting it into the room graph.
+
+ Raises:
+ SynapseError if the event is not accepted into the room
+ """
+ logger.debug(
+ "on_send_membership_event: Got event: %s, signatures: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ if get_domain_from_id(event.sender) != origin:
+ logger.info(
+ "Got send_membership request for user %r from different origin %s",
+ event.sender,
+ origin,
+ )
+ raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
+ if event.sender != event.state_key:
+ raise SynapseError(400, "state_key and sender must match", Codes.BAD_JSON)
+
+ assert not event.internal_metadata.outlier
+
+ # Send this event on behalf of the other server.
+ #
+ # The remote server isn't a full participant in the room at this point, so
+ # may not have an up-to-date list of the other homeservers participating in
+ # the room, so we send it on their behalf.
+ event.internal_metadata.send_on_behalf_of = origin
+
+ context = await self._state_handler.compute_event_context(event)
+ context = await self._check_event_auth(origin, event, context)
+ if context.rejected:
+ raise SynapseError(
+ 403, f"{event.membership} event was rejected", Codes.FORBIDDEN
+ )
+
+ # for joins, we need to check the restrictions of restricted rooms
+ if event.membership == Membership.JOIN:
+ await self.check_join_restrictions(context, event)
+
+ # for knock events, we run the third-party event rules. It's not entirely clear
+ # why we don't do this for other sorts of membership events.
+ if event.membership == Membership.KNOCK:
+ event_allowed, _ = await self._third_party_event_rules.check_event_allowed(
+ event, context
+ )
+ if not event_allowed:
+ logger.info("Sending of knock %s forbidden by third-party rules", event)
+ raise SynapseError(
+ 403, "This event is not allowed in this context", Codes.FORBIDDEN
+ )
+
+ # all looks good, we can persist the event.
+ await self._run_push_actions_and_persist_event(event, context)
+ return event, context
+
+ async def check_join_restrictions(
+ self, context: EventContext, event: EventBase
+ ) -> None:
+ """Check that restrictions in restricted join rules are matched
+
+ Called when we receive a join event via send_join.
+
+ Raises an auth error if the restrictions are not matched.
+ """
+ prev_state_ids = await context.get_prev_state_ids()
+
+ # Check if the user is already in the room or invited to the room.
+ user_id = event.state_key
+ prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
+ prev_member_event = None
+ if prev_member_event_id:
+ prev_member_event = await self._store.get_event(prev_member_event_id)
+
+ # Check if the member should be allowed access via membership in a space.
+ await self._event_auth_handler.check_restricted_join_rules(
+ prev_state_ids,
+ event.room_version,
+ user_id,
+ prev_member_event,
+ )
+
+ @log_function
+ async def backfill(
+ self, dest: str, room_id: str, limit: int, extremities: List[str]
+ ) -> None:
+ """Trigger a backfill request to `dest` for the given `room_id`
+
+ This will attempt to get more events from the remote. If the other side
+ has no new events to offer, this will return an empty list.
+
+ As the events are received, we check their signatures, and also do some
+ sanity-checking on them. If any of the backfilled events are invalid,
+ this method throws a SynapseError.
+
+ We might also raise an InvalidResponseError if the response from the remote
+ server is just bogus.
+
+ TODO: make this more useful to distinguish failures of the remote
+ server from invalid events (there is probably no point in trying to
+ re-fetch invalid events from every other HS in the room.)
+ """
+ if dest == self._server_name:
+ raise SynapseError(400, "Can't backfill from self.")
+
+ events = await self._federation_client.backfill(
+ dest, room_id, limit=limit, extremities=extremities
+ )
+
+ if not events:
+ return
+
+ # if there are any events in the wrong room, the remote server is buggy and
+ # should not be trusted.
+ for ev in events:
+ if ev.room_id != room_id:
+ raise InvalidResponseError(
+ f"Remote server {dest} returned event {ev.event_id} which is in "
+ f"room {ev.room_id}, when we were backfilling in {room_id}"
+ )
+
+ await self._process_pulled_events(dest, events, backfilled=True)
+
+ async def _get_missing_events_for_pdu(
+ self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
+ ) -> None:
+ """
+ Args:
+ origin: Origin of the pdu. Will be called to get the missing events
+ pdu: received pdu
+ prevs: List of event ids which we are missing
+ min_depth: Minimum depth of events to return.
+ """
+
+ room_id = pdu.room_id
+ event_id = pdu.event_id
+
+ seen = await self._store.have_events_in_timeline(prevs)
+
+ if not prevs - seen:
+ return
+
+ latest_list = await self._store.get_latest_event_ids_in_room(room_id)
+
+ # We add the prev events that we have seen to the latest
+ # list to ensure the remote server doesn't give them to us
+ latest = set(latest_list)
+ latest |= seen
+
+ logger.info(
+ "Requesting missing events between %s and %s",
+ shortstr(latest),
+ event_id,
+ )
+
+ # XXX: we set timeout to 10s to help workaround
+ # https://github.com/matrix-org/synapse/issues/1733.
+ # The reason is to avoid holding the linearizer lock
+ # whilst processing inbound /send transactions, causing
+ # FDs to stack up and block other inbound transactions
+ # which empirically can currently take up to 30 minutes.
+ #
+ # N.B. this explicitly disables retry attempts.
+ #
+ # N.B. this also increases our chances of falling back to
+ # fetching fresh state for the room if the missing event
+ # can't be found, which slightly reduces our security.
+ # it may also increase our DAG extremity count for the room,
+ # causing additional state resolution? See #1760.
+ # However, fetching state doesn't hold the linearizer lock
+ # apparently.
+ #
+ # see https://github.com/matrix-org/synapse/pull/1744
+ #
+ # ----
+ #
+ # Update richvdh 2018/09/18: There are a number of problems with timing this
+ # request out aggressively on the client side:
+ #
+ # - it plays badly with the server-side rate-limiter, which starts tarpitting you
+ # if you send too many requests at once, so you end up with the server carefully
+ # working through the backlog of your requests, which you have already timed
+ # out.
+ #
+ # - for this request in particular, we now (as of
+ # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
+ # server can't produce a plausible-looking set of prev_events - so we becone
+ # much more likely to reject the event.
+ #
+ # - contrary to what it says above, we do *not* fall back to fetching fresh state
+ # for the room if get_missing_events times out. Rather, we give up processing
+ # the PDU whose prevs we are missing, which then makes it much more likely that
+ # we'll end up back here for the *next* PDU in the list, which exacerbates the
+ # problem.
+ #
+ # - the aggressive 10s timeout was introduced to deal with incoming federation
+ # requests taking 8 hours to process. It's not entirely clear why that was going
+ # on; certainly there were other issues causing traffic storms which are now
+ # resolved, and I think in any case we may be more sensible about our locking
+ # now. We're *certainly* more sensible about our logging.
+ #
+ # All that said: Let's try increasing the timeout to 60s and see what happens.
+
+ try:
+ missing_events = await self._federation_client.get_missing_events(
+ origin,
+ room_id,
+ earliest_events_ids=list(latest),
+ latest_events=[pdu],
+ limit=10,
+ min_depth=min_depth,
+ timeout=60000,
+ )
+ except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e:
+ # We failed to get the missing events, but since we need to handle
+ # the case of `get_missing_events` not returning the necessary
+ # events anyway, it is safe to simply log the error and continue.
+ logger.warning("Failed to get prev_events: %s", e)
+ return
+
+ logger.info("Got %d prev_events", len(missing_events))
+ await self._process_pulled_events(origin, missing_events, backfilled=False)
+
+ async def _process_pulled_events(
+ self, origin: str, events: Iterable[EventBase], backfilled: bool
+ ) -> None:
+ """Process a batch of events we have pulled from a remote server
+
+ Pulls in any events required to auth the events, persists the received events,
+ and notifies clients, if appropriate.
+
+ Assumes the events have already had their signatures and hashes checked.
+
+ Params:
+ origin: The server we received these events from
+ events: The received events.
+ backfilled: True if this is part of a historical batch of events (inhibits
+ notification to clients, and validation of device keys.)
+ """
+
+ # We want to sort these by depth so we process them and
+ # tell clients about them in order.
+ sorted_events = sorted(events, key=lambda x: x.depth)
+
+ for ev in sorted_events:
+ with nested_logging_context(ev.event_id):
+ await self._process_pulled_event(origin, ev, backfilled=backfilled)
+
+ async def _process_pulled_event(
+ self, origin: str, event: EventBase, backfilled: bool
+ ) -> None:
+ """Process a single event that we have pulled from a remote server
+
+ Pulls in any events required to auth the event, persists the received event,
+ and notifies clients, if appropriate.
+
+ Assumes the event has already had its signatures and hashes checked.
+
+ This is somewhat equivalent to on_receive_pdu, but applies somewhat different
+ logic in the case that we are missing prev_events (in particular, it just
+ requests the state at that point, rather than triggering a get_missing_events) -
+ so is appropriate when we have pulled the event from a remote server, rather
+ than having it pushed to us.
+
+ Params:
+ origin: The server we received this event from
+ events: The received event
+ backfilled: True if this is part of a historical batch of events (inhibits
+ notification to clients, and validation of device keys.)
+ """
+ logger.info("Processing pulled event %s", event)
+
+ # these should not be outliers.
+ assert not event.internal_metadata.is_outlier()
+
+ event_id = event.event_id
+
+ existing = await self._store.get_event(
+ event_id, allow_none=True, allow_rejected=True
+ )
+ if existing:
+ if not existing.internal_metadata.is_outlier():
+ logger.info(
+ "Ignoring received event %s which we have already seen",
+ event_id,
+ )
+ return
+ logger.info("De-outliering event %s", event_id)
+
+ try:
+ self._sanity_check_event(event)
+ except SynapseError as err:
+ logger.warning("Event %s failed sanity check: %s", event_id, err)
+ return
+
+ try:
+ state = await self._resolve_state_at_missing_prevs(origin, event)
+ await self._process_received_pdu(
+ origin, event, state=state, backfilled=backfilled
+ )
+ except FederationError as e:
+ if e.code == 403:
+ logger.warning("Pulled event %s failed history check.", event_id)
+ else:
+ raise
+
+ async def _resolve_state_at_missing_prevs(
+ self, dest: str, event: EventBase
+ ) -> Optional[Iterable[EventBase]]:
+ """Calculate the state at an event with missing prev_events.
+
+ This is used when we have pulled a batch of events from a remote server, and
+ still don't have all the prev_events.
+
+ If we already have all the prev_events for `event`, this method does nothing.
+
+ Otherwise, the missing prevs become new backwards extremities, and we fall back
+ to asking the remote server for the state after each missing `prev_event`,
+ and resolving across them.
+
+ That's ok provided we then resolve the state against other bits of the DAG
+ before using it - in other words, that the received event `event` is not going
+ to become the only forwards_extremity in the room (which will ensure that you
+ can't just take over a room by sending an event, withholding its prev_events,
+ and declaring yourself to be an admin in the subsequent state request).
+
+ In other words: we should only call this method if `event` has been *pulled*
+ as part of a batch of missing prev events, or similar.
+
+ Params:
+ dest: the remote server to ask for state at the missing prevs. Typically,
+ this will be the server we got `event` from.
+ event: an event to check for missing prevs.
+
+ Returns:
+ if we already had all the prev events, `None`. Otherwise, returns a list of
+ the events in the state at `event`.
+ """
+ room_id = event.room_id
+ event_id = event.event_id
+
+ prevs = set(event.prev_event_ids())
+ seen = await self._store.have_events_in_timeline(prevs)
+ missing_prevs = prevs - seen
+
+ if not missing_prevs:
+ return None
+
+ logger.info(
+ "Event %s is missing prev_events %s: calculating state for a "
+ "backwards extremity",
+ event_id,
+ shortstr(missing_prevs),
+ )
+ # Calculate the state after each of the previous events, and
+ # resolve them to find the correct state at the current event.
+ event_map = {event_id: event}
+ try:
+ # Get the state of the events we know about
+ ours = await self._state_store.get_state_groups_ids(room_id, seen)
+
+ # state_maps is a list of mappings from (type, state_key) to event_id
+ state_maps: List[StateMap[str]] = list(ours.values())
+
+ # we don't need this any more, let's delete it.
+ del ours
+
+ # Ask the remote server for the states we don't
+ # know about
+ for p in missing_prevs:
+ logger.info("Requesting state after missing prev_event %s", p)
+
+ with nested_logging_context(p):
+ # note that if any of the missing prevs share missing state or
+ # auth events, the requests to fetch those events are deduped
+ # by the get_pdu_cache in federation_client.
+ remote_state = await self._get_state_after_missing_prev_event(
+ dest, room_id, p
+ )
+
+ remote_state_map = {
+ (x.type, x.state_key): x.event_id for x in remote_state
+ }
+ state_maps.append(remote_state_map)
+
+ for x in remote_state:
+ event_map[x.event_id] = x
+
+ room_version = await self._store.get_room_version_id(room_id)
+ state_map = await self._state_resolution_handler.resolve_events_with_store(
+ room_id,
+ room_version,
+ state_maps,
+ event_map,
+ state_res_store=StateResolutionStore(self._store),
+ )
+
+ # We need to give _process_received_pdu the actual state events
+ # rather than event ids, so generate that now.
+
+ # First though we need to fetch all the events that are in
+ # state_map, so we can build up the state below.
+ evs = await self._store.get_events(
+ list(state_map.values()),
+ get_prev_content=False,
+ redact_behaviour=EventRedactBehaviour.AS_IS,
+ )
+ event_map.update(evs)
+
+ state = [event_map[e] for e in state_map.values()]
+ except Exception:
+ logger.warning(
+ "Error attempting to resolve state at missing prev_events",
+ exc_info=True,
+ )
+ raise FederationError(
+ "ERROR",
+ 403,
+ "We can't get valid state history.",
+ affected=event_id,
+ )
+ return state
+
+ async def _get_state_after_missing_prev_event(
+ self,
+ destination: str,
+ room_id: str,
+ event_id: str,
+ ) -> List[EventBase]:
+ """Requests all of the room state at a given event from a remote homeserver.
+
+ Args:
+ destination: The remote homeserver to query for the state.
+ room_id: The id of the room we're interested in.
+ event_id: The id of the event we want the state at.
+
+ Returns:
+ A list of events in the state, including the event itself
+ """
+ (
+ state_event_ids,
+ auth_event_ids,
+ ) = await self._federation_client.get_room_state_ids(
+ destination, room_id, event_id=event_id
+ )
+
+ logger.debug(
+ "state_ids returned %i state events, %i auth events",
+ len(state_event_ids),
+ len(auth_event_ids),
+ )
+
+ # start by just trying to fetch the events from the store
+ desired_events = set(state_event_ids)
+ desired_events.add(event_id)
+ logger.debug("Fetching %i events from cache/store", len(desired_events))
+ fetched_events = await self._store.get_events(
+ desired_events, allow_rejected=True
+ )
+
+ missing_desired_events = desired_events - fetched_events.keys()
+ logger.debug(
+ "We are missing %i events (got %i)",
+ len(missing_desired_events),
+ len(fetched_events),
+ )
+
+ # We probably won't need most of the auth events, so let's just check which
+ # we have for now, rather than thrashing the event cache with them all
+ # unnecessarily.
+
+ # TODO: we probably won't actually need all of the auth events, since we
+ # already have a bunch of the state events. It would be nice if the
+ # federation api gave us a way of finding out which we actually need.
+
+ missing_auth_events = set(auth_event_ids) - fetched_events.keys()
+ missing_auth_events.difference_update(
+ await self._store.have_seen_events(room_id, missing_auth_events)
+ )
+ logger.debug("We are also missing %i auth events", len(missing_auth_events))
+
+ missing_events = missing_desired_events | missing_auth_events
+ logger.debug("Fetching %i events from remote", len(missing_events))
+ await self._get_events_and_persist(
+ destination=destination, room_id=room_id, events=missing_events
+ )
+
+ # we need to make sure we re-load from the database to get the rejected
+ # state correct.
+ fetched_events.update(
+ await self._store.get_events(missing_desired_events, allow_rejected=True)
+ )
+
+ # check for events which were in the wrong room.
+ #
+ # this can happen if a remote server claims that the state or
+ # auth_events at an event in room A are actually events in room B
+
+ bad_events = [
+ (event_id, event.room_id)
+ for event_id, event in fetched_events.items()
+ if event.room_id != room_id
+ ]
+
+ for bad_event_id, bad_room_id in bad_events:
+ # This is a bogus situation, but since we may only discover it a long time
+ # after it happened, we try our best to carry on, by just omitting the
+ # bad events from the returned state set.
+ logger.warning(
+ "Remote server %s claims event %s in room %s is an auth/state "
+ "event in room %s",
+ destination,
+ bad_event_id,
+ bad_room_id,
+ room_id,
+ )
+
+ del fetched_events[bad_event_id]
+
+ # if we couldn't get the prev event in question, that's a problem.
+ remote_event = fetched_events.get(event_id)
+ if not remote_event:
+ raise Exception("Unable to get missing prev_event %s" % (event_id,))
+
+ # missing state at that event is a warning, not a blocker
+ # XXX: this doesn't sound right? it means that we'll end up with incomplete
+ # state.
+ failed_to_fetch = desired_events - fetched_events.keys()
+ if failed_to_fetch:
+ logger.warning(
+ "Failed to fetch missing state events for %s %s",
+ event_id,
+ failed_to_fetch,
+ )
+
+ remote_state = [
+ fetched_events[e_id] for e_id in state_event_ids if e_id in fetched_events
+ ]
+
+ if remote_event.is_state() and remote_event.rejected_reason is None:
+ remote_state.append(remote_event)
+
+ return remote_state
+
+ async def _process_received_pdu(
+ self,
+ origin: str,
+ event: EventBase,
+ state: Optional[Iterable[EventBase]],
+ backfilled: bool = False,
+ ) -> None:
+ """Called when we have a new pdu. We need to do auth checks and put it
+ through the StateHandler.
+
+ Args:
+ origin: server sending the event
+
+ event: event to be persisted
+
+ state: Normally None, but if we are handling a gap in the graph
+ (ie, we are missing one or more prev_events), the resolved state at the
+ event
+
+ backfilled: True if this is part of a historical batch of events (inhibits
+ notification to clients, and validation of device keys.)
+ """
+ logger.debug("Processing event: %s", event)
+
+ try:
+ context = await self._state_handler.compute_event_context(
+ event, old_state=state
+ )
+ await self._auth_and_persist_event(
+ origin, event, context, state=state, backfilled=backfilled
+ )
+ except AuthError as e:
+ raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
+
+ if backfilled:
+ return
+
+ # For encrypted messages we check that we know about the sending device,
+ # if we don't then we mark the device cache for that user as stale.
+ if event.type == EventTypes.Encrypted:
+ device_id = event.content.get("device_id")
+ sender_key = event.content.get("sender_key")
+
+ cached_devices = await self._store.get_cached_devices_for_user(event.sender)
+
+ resync = False # Whether we should resync device lists.
+
+ device = None
+ if device_id is not None:
+ device = cached_devices.get(device_id)
+ if device is None:
+ logger.info(
+ "Received event from remote device not in our cache: %s %s",
+ event.sender,
+ device_id,
+ )
+ resync = True
+
+ # We also check if the `sender_key` matches what we expect.
+ if sender_key is not None:
+ # Figure out what sender key we're expecting. If we know the
+ # device and recognize the algorithm then we can work out the
+ # exact key to expect. Otherwise check it matches any key we
+ # have for that device.
+
+ current_keys: Container[str] = []
+
+ if device:
+ keys = device.get("keys", {}).get("keys", {})
+
+ if (
+ event.content.get("algorithm")
+ == RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2
+ ):
+ # For this algorithm we expect a curve25519 key.
+ key_name = "curve25519:%s" % (device_id,)
+ current_keys = [keys.get(key_name)]
+ else:
+ # We don't know understand the algorithm, so we just
+ # check it matches a key for the device.
+ current_keys = keys.values()
+ elif device_id:
+ # We don't have any keys for the device ID.
+ pass
+ else:
+ # The event didn't include a device ID, so we just look for
+ # keys across all devices.
+ current_keys = [
+ key
+ for device in cached_devices.values()
+ for key in device.get("keys", {}).get("keys", {}).values()
+ ]
+
+ # We now check that the sender key matches (one of) the expected
+ # keys.
+ if sender_key not in current_keys:
+ logger.info(
+ "Received event from remote device with unexpected sender key: %s %s: %s",
+ event.sender,
+ device_id or "<no device_id>",
+ sender_key,
+ )
+ resync = True
+
+ if resync:
+ run_as_background_process(
+ "resync_device_due_to_pdu",
+ self._resync_device,
+ event.sender,
+ )
+
+ await self._handle_marker_event(origin, event)
+
+ async def _resync_device(self, sender: str) -> None:
+ """We have detected that the device list for the given user may be out
+ of sync, so we try and resync them.
+ """
+
+ try:
+ await self._store.mark_remote_user_device_cache_as_stale(sender)
+
+ # Immediately attempt a resync in the background
+ if self._config.worker_app:
+ await self._user_device_resync(user_id=sender)
+ else:
+ await self._device_list_updater.user_device_resync(sender)
+ except Exception:
+ logger.exception("Failed to resync device for %s", sender)
+
+ async def _handle_marker_event(self, origin: str, marker_event: EventBase):
+ """Handles backfilling the insertion event when we receive a marker
+ event that points to one.
+
+ Args:
+ origin: Origin of the event. Will be called to get the insertion event
+ marker_event: The event to process
+ """
+
+ if marker_event.type != EventTypes.MSC2716_MARKER:
+ # Not a marker event
+ return
+
+ if marker_event.rejected_reason is not None:
+ # Rejected event
+ return
+
+ # Skip processing a marker event if the room version doesn't
+ # support it or the event is not from the room creator.
+ room_version = await self._store.get_room_version(marker_event.room_id)
+ create_event = await self._store.get_create_event_for_room(marker_event.room_id)
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+ if (
+ not room_version.msc2716_historical
+ or not self._config.experimental.msc2716_enabled
+ or marker_event.sender != room_creator
+ ):
+ return
+
+ logger.debug("_handle_marker_event: received %s", marker_event)
+
+ insertion_event_id = marker_event.content.get(
+ EventContentFields.MSC2716_MARKER_INSERTION
+ )
+
+ if insertion_event_id is None:
+ # Nothing to retrieve then (invalid marker)
+ return
+
+ logger.debug(
+ "_handle_marker_event: backfilling insertion event %s", insertion_event_id
+ )
+
+ await self._get_events_and_persist(
+ origin,
+ marker_event.room_id,
+ [insertion_event_id],
+ )
+
+ insertion_event = await self._store.get_event(
+ insertion_event_id, allow_none=True
+ )
+ if insertion_event is None:
+ logger.warning(
+ "_handle_marker_event: server %s didn't return insertion event %s for marker %s",
+ origin,
+ insertion_event_id,
+ marker_event.event_id,
+ )
+ return
+
+ logger.debug(
+ "_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
+ insertion_event,
+ marker_event,
+ )
+
+ await self._store.insert_insertion_extremity(
+ insertion_event_id, marker_event.room_id
+ )
+
+ logger.debug(
+ "_handle_marker_event: insertion extremity added for %s from marker event %s",
+ insertion_event,
+ marker_event,
+ )
+
+ async def _get_events_and_persist(
+ self, destination: str, room_id: str, events: Iterable[str]
+ ) -> None:
+ """Fetch the given events from a server, and persist them as outliers.
+
+ This function *does not* recursively get missing auth events of the
+ newly fetched events. Callers must include in the `events` argument
+ any missing events from the auth chain.
+
+ Logs a warning if we can't find the given event.
+ """
+
+ room_version = await self._store.get_room_version(room_id)
+
+ event_map: Dict[str, EventBase] = {}
+
+ async def get_event(event_id: str):
+ with nested_logging_context(event_id):
+ try:
+ event = await self._federation_client.get_pdu(
+ [destination],
+ event_id,
+ room_version,
+ outlier=True,
+ )
+ if event is None:
+ logger.warning(
+ "Server %s didn't return event %s",
+ destination,
+ event_id,
+ )
+ return
+
+ event_map[event.event_id] = event
+
+ except Exception as e:
+ logger.warning(
+ "Error fetching missing state/auth event %s: %s %s",
+ event_id,
+ type(e),
+ e,
+ )
+
+ await concurrently_execute(get_event, events, 5)
+
+ # Make a map of auth events for each event. We do this after fetching
+ # all the events as some of the events' auth events will be in the list
+ # of requested events.
+
+ auth_events = [
+ aid
+ for event in event_map.values()
+ for aid in event.auth_event_ids()
+ if aid not in event_map
+ ]
+ persisted_events = await self._store.get_events(
+ auth_events,
+ allow_rejected=True,
+ )
+
+ event_infos = []
+ for event in event_map.values():
+ auth = {}
+ for auth_event_id in event.auth_event_ids():
+ ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id)
+ if ae:
+ auth[(ae.type, ae.state_key)] = ae
+ else:
+ logger.info("Missing auth event %s", auth_event_id)
+
+ event_infos.append(_NewEventInfo(event, auth))
+
+ if event_infos:
+ await self._auth_and_persist_events(
+ destination,
+ room_id,
+ event_infos,
+ )
+
+ async def _auth_and_persist_events(
+ self,
+ origin: str,
+ room_id: str,
+ event_infos: Collection[_NewEventInfo],
+ ) -> None:
+ """Creates the appropriate contexts and persists events. The events
+ should not depend on one another, e.g. this should be used to persist
+ a bunch of outliers, but not a chunk of individual events that depend
+ on each other for state calculations.
+
+ Notifies about the events where appropriate.
+ """
+
+ if not event_infos:
+ return
+
+ async def prep(ev_info: _NewEventInfo):
+ event = ev_info.event
+ with nested_logging_context(suffix=event.event_id):
+ res = await self._state_handler.compute_event_context(event)
+ res = await self._check_event_auth(
+ origin,
+ event,
+ res,
+ claimed_auth_event_map=ev_info.claimed_auth_event_map,
+ )
+ return res
+
+ contexts = await make_deferred_yieldable(
+ defer.gatherResults(
+ [run_in_background(prep, ev_info) for ev_info in event_infos],
+ consumeErrors=True,
+ )
+ )
+
+ await self.persist_events_and_notify(
+ room_id,
+ [
+ (ev_info.event, context)
+ for ev_info, context in zip(event_infos, contexts)
+ ],
+ )
+
+ async def _auth_and_persist_event(
+ self,
+ origin: str,
+ event: EventBase,
+ context: EventContext,
+ state: Optional[Iterable[EventBase]] = None,
+ claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
+ backfilled: bool = False,
+ ) -> None:
+ """
+ Process an event by performing auth checks and then persisting to the database.
+
+ Args:
+ origin: The host the event originates from.
+ event: The event itself.
+ context:
+ The event context.
+
+ state:
+ The state events used to check the event for soft-fail. If this is
+ not provided the current state events will be used.
+
+ claimed_auth_event_map:
+ A map of (type, state_key) => event for the event's claimed auth_events.
+ Possibly incomplete, and possibly including events that are not yet
+ persisted, or authed, or in the right room.
+
+ Only populated where we may not already have persisted these events -
+ for example, when populating outliers.
+
+ backfilled: True if the event was backfilled.
+ """
+ context = await self._check_event_auth(
+ origin,
+ event,
+ context,
+ state=state,
+ claimed_auth_event_map=claimed_auth_event_map,
+ backfilled=backfilled,
+ )
+
+ await self._run_push_actions_and_persist_event(event, context, backfilled)
+
+ async def _check_event_auth(
+ self,
+ origin: str,
+ event: EventBase,
+ context: EventContext,
+ state: Optional[Iterable[EventBase]] = None,
+ claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
+ backfilled: bool = False,
+ ) -> EventContext:
+ """
+ Checks whether an event should be rejected (for failing auth checks).
+
+ Args:
+ origin: The host the event originates from.
+ event: The event itself.
+ context:
+ The event context.
+
+ state:
+ The state events used to check the event for soft-fail. If this is
+ not provided the current state events will be used.
+
+ claimed_auth_event_map:
+ A map of (type, state_key) => event for the event's claimed auth_events.
+ Possibly incomplete, and possibly including events that are not yet
+ persisted, or authed, or in the right room.
+
+ Only populated where we may not already have persisted these events -
+ for example, when populating outliers, or the state for a backwards
+ extremity.
+
+ backfilled: True if the event was backfilled.
+
+ Returns:
+ The updated context object.
+ """
+ room_version = await self._store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ if claimed_auth_event_map:
+ # if we have a copy of the auth events from the event, use that as the
+ # basis for auth.
+ auth_events = claimed_auth_event_map
+ else:
+ # otherwise, we calculate what the auth events *should* be, and use that
+ prev_state_ids = await context.get_prev_state_ids()
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
+ event, prev_state_ids, for_verification=True
+ )
+ auth_events_x = await self._store.get_events(auth_events_ids)
+ auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
+
+ try:
+ (
+ context,
+ auth_events_for_auth,
+ ) = await self._update_auth_events_and_context_for_auth(
+ origin, event, context, auth_events
+ )
+ except Exception:
+ # We don't really mind if the above fails, so lets not fail
+ # processing if it does. However, it really shouldn't fail so
+ # let's still log as an exception since we'll still want to fix
+ # any bugs.
+ logger.exception(
+ "Failed to double check auth events for %s with remote. "
+ "Ignoring failure and continuing processing of event.",
+ event.event_id,
+ )
+ auth_events_for_auth = auth_events
+
+ try:
+ event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
+ except AuthError as e:
+ logger.warning("Failed auth resolution for %r because %s", event, e)
+ context.rejected = RejectedReason.AUTH_ERROR
+
+ if not context.rejected:
+ await self._check_for_soft_fail(event, state, backfilled, origin=origin)
+ await self._maybe_kick_guest_users(event)
+
+ # If we are going to send this event over federation we precaclculate
+ # the joined hosts.
+ if event.internal_metadata.get_send_on_behalf_of():
+ await self._event_creation_handler.cache_joined_hosts_for_event(
+ event, context
+ )
+
+ return context
+
+ async def _maybe_kick_guest_users(self, event: EventBase) -> None:
+ if event.type != EventTypes.GuestAccess:
+ return
+
+ guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
+ if guest_access == GuestAccess.CAN_JOIN:
+ return
+
+ current_state_map = await self._state_handler.get_current_state(event.room_id)
+ current_state = list(current_state_map.values())
+ await self._get_room_member_handler().kick_guest_users(current_state)
+
+ async def _check_for_soft_fail(
+ self,
+ event: EventBase,
+ state: Optional[Iterable[EventBase]],
+ backfilled: bool,
+ origin: str,
+ ) -> None:
+ """Checks if we should soft fail the event; if so, marks the event as
+ such.
+
+ Args:
+ event
+ state: The state at the event if we don't have all the event's prev events
+ backfilled: Whether the event is from backfill
+ origin: The host the event originates from.
+ """
+ # For new (non-backfilled and non-outlier) events we check if the event
+ # passes auth based on the current state. If it doesn't then we
+ # "soft-fail" the event.
+ if backfilled or event.internal_metadata.is_outlier():
+ return
+
+ extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
+ extrem_ids = set(extrem_ids_list)
+ prev_event_ids = set(event.prev_event_ids())
+
+ if extrem_ids == prev_event_ids:
+ # If they're the same then the current state is the same as the
+ # state at the event, so no point rechecking auth for soft fail.
+ return
+
+ room_version = await self._store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ # Calculate the "current state".
+ if state is not None:
+ # If we're explicitly given the state then we won't have all the
+ # prev events, and so we have a gap in the graph. In this case
+ # we want to be a little careful as we might have been down for
+ # a while and have an incorrect view of the current state,
+ # however we still want to do checks as gaps are easy to
+ # maliciously manufacture.
+ #
+ # So we use a "current state" that is actually a state
+ # resolution across the current forward extremities and the
+ # given state at the event. This should correctly handle cases
+ # like bans, especially with state res v2.
+
+ state_sets_d = await self._state_store.get_state_groups(
+ event.room_id, extrem_ids
+ )
+ state_sets: List[Iterable[EventBase]] = list(state_sets_d.values())
+ state_sets.append(state)
+ current_states = await self._state_handler.resolve_events(
+ room_version, state_sets, event
+ )
+ current_state_ids: StateMap[str] = {
+ k: e.event_id for k, e in current_states.items()
+ }
+ else:
+ current_state_ids = await self._state_handler.get_current_state_ids(
+ event.room_id, latest_event_ids=extrem_ids
+ )
+
+ logger.debug(
+ "Doing soft-fail check for %s: state %s",
+ event.event_id,
+ current_state_ids,
+ )
+
+ # Now check if event pass auth against said current state
+ auth_types = auth_types_for_event(room_version_obj, event)
+ current_state_ids_list = [
+ e for k, e in current_state_ids.items() if k in auth_types
+ ]
+
+ auth_events_map = await self._store.get_events(current_state_ids_list)
+ current_auth_events = {
+ (e.type, e.state_key): e for e in auth_events_map.values()
+ }
+
+ try:
+ event_auth.check(room_version_obj, event, auth_events=current_auth_events)
+ except AuthError as e:
+ logger.warning(
+ "Soft-failing %r (from %s) because %s",
+ event,
+ e,
+ origin,
+ extra={
+ "room_id": event.room_id,
+ "mxid": event.sender,
+ "hs": origin,
+ },
+ )
+ soft_failed_event_counter.inc()
+ event.internal_metadata.soft_failed = True
+
+ async def _update_auth_events_and_context_for_auth(
+ self,
+ origin: str,
+ event: EventBase,
+ context: EventContext,
+ input_auth_events: StateMap[EventBase],
+ ) -> Tuple[EventContext, StateMap[EventBase]]:
+ """Helper for _check_event_auth. See there for docs.
+
+ Checks whether a given event has the expected auth events. If it
+ doesn't then we talk to the remote server to compare state to see if
+ we can come to a consensus (e.g. if one server missed some valid
+ state).
+
+ This attempts to resolve any potential divergence of state between
+ servers, but is not essential and so failures should not block further
+ processing of the event.
+
+ Args:
+ origin:
+ event:
+ context:
+
+ input_auth_events:
+ Map from (event_type, state_key) to event
+
+ Normally, our calculated auth_events based on the state of the room
+ at the event's position in the DAG, though occasionally (eg if the
+ event is an outlier), may be the auth events claimed by the remote
+ server.
+
+ Returns:
+ updated context, updated auth event map
+ """
+ # take a copy of input_auth_events before we modify it.
+ auth_events: MutableStateMap[EventBase] = dict(input_auth_events)
+
+ event_auth_events = set(event.auth_event_ids())
+
+ # missing_auth is the set of the event's auth_events which we don't yet have
+ # in auth_events.
+ missing_auth = event_auth_events.difference(
+ e.event_id for e in auth_events.values()
+ )
+
+ # if we have missing events, we need to fetch those events from somewhere.
+ #
+ # we start by checking if they are in the store, and then try calling /event_auth/.
+ if missing_auth:
+ have_events = await self._store.have_seen_events(
+ event.room_id, missing_auth
+ )
+ logger.debug("Events %s are in the store", have_events)
+ missing_auth.difference_update(have_events)
+
+ if missing_auth:
+ # If we don't have all the auth events, we need to get them.
+ logger.info("auth_events contains unknown events: %s", missing_auth)
+ try:
+ try:
+ remote_auth_chain = await self._federation_client.get_event_auth(
+ origin, event.room_id, event.event_id
+ )
+ except RequestSendFailed as e1:
+ # The other side isn't around or doesn't implement the
+ # endpoint, so lets just bail out.
+ logger.info("Failed to get event auth from remote: %s", e1)
+ return context, auth_events
+
+ seen_remotes = await self._store.have_seen_events(
+ event.room_id, [e.event_id for e in remote_auth_chain]
+ )
+
+ for e in remote_auth_chain:
+ if e.event_id in seen_remotes:
+ continue
+
+ if e.event_id == event.event_id:
+ continue
+
+ try:
+ auth_ids = e.auth_event_ids()
+ auth = {
+ (e.type, e.state_key): e
+ for e in remote_auth_chain
+ if e.event_id in auth_ids or e.type == EventTypes.Create
+ }
+ e.internal_metadata.outlier = True
+
+ logger.debug(
+ "_check_event_auth %s missing_auth: %s",
+ event.event_id,
+ e.event_id,
+ )
+ missing_auth_event_context = (
+ await self._state_handler.compute_event_context(e)
+ )
+ await self._auth_and_persist_event(
+ origin,
+ e,
+ missing_auth_event_context,
+ claimed_auth_event_map=auth,
+ )
+
+ if e.event_id in event_auth_events:
+ auth_events[(e.type, e.state_key)] = e
+ except AuthError:
+ pass
+
+ except Exception:
+ logger.exception("Failed to get auth chain")
+
+ if event.internal_metadata.is_outlier():
+ # XXX: given that, for an outlier, we'll be working with the
+ # event's *claimed* auth events rather than those we calculated:
+ # (a) is there any point in this test, since different_auth below will
+ # obviously be empty
+ # (b) alternatively, why don't we do it earlier?
+ logger.info("Skipping auth_event fetch for outlier")
+ return context, auth_events
+
+ different_auth = event_auth_events.difference(
+ e.event_id for e in auth_events.values()
+ )
+
+ if not different_auth:
+ return context, auth_events
+
+ logger.info(
+ "auth_events refers to events which are not in our calculated auth "
+ "chain: %s",
+ different_auth,
+ )
+
+ # XXX: currently this checks for redactions but I'm not convinced that is
+ # necessary?
+ different_events = await self._store.get_events_as_list(different_auth)
+
+ for d in different_events:
+ if d.room_id != event.room_id:
+ logger.warning(
+ "Event %s refers to auth_event %s which is in a different room",
+ event.event_id,
+ d.event_id,
+ )
+
+ # don't attempt to resolve the claimed auth events against our own
+ # in this case: just use our own auth events.
+ #
+ # XXX: should we reject the event in this case? It feels like we should,
+ # but then shouldn't we also do so if we've failed to fetch any of the
+ # auth events?
+ return context, auth_events
+
+ # now we state-resolve between our own idea of the auth events, and the remote's
+ # idea of them.
+
+ local_state = auth_events.values()
+ remote_auth_events = dict(auth_events)
+ remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
+ remote_state = remote_auth_events.values()
+
+ room_version = await self._store.get_room_version_id(event.room_id)
+ new_state = await self._state_handler.resolve_events(
+ room_version, (local_state, remote_state), event
+ )
+
+ logger.info(
+ "After state res: updating auth_events with new state %s",
+ {
+ (d.type, d.state_key): d.event_id
+ for d in new_state.values()
+ if auth_events.get((d.type, d.state_key)) != d
+ },
+ )
+
+ auth_events.update(new_state)
+
+ context = await self._update_context_for_auth_events(
+ event, context, auth_events
+ )
+
+ return context, auth_events
+
+ async def _update_context_for_auth_events(
+ self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
+ ) -> EventContext:
+ """Update the state_ids in an event context after auth event resolution,
+ storing the changes as a new state group.
+
+ Args:
+ event: The event we're handling the context for
+
+ context: initial event context
+
+ auth_events: Events to update in the event context.
+
+ Returns:
+ new event context
+ """
+ # exclude the state key of the new event from the current_state in the context.
+ if event.is_state():
+ event_key: Optional[Tuple[str, str]] = (event.type, event.state_key)
+ else:
+ event_key = None
+ state_updates = {
+ k: a.event_id for k, a in auth_events.items() if k != event_key
+ }
+
+ current_state_ids = await context.get_current_state_ids()
+ current_state_ids = dict(current_state_ids) # type: ignore
+
+ current_state_ids.update(state_updates)
+
+ prev_state_ids = await context.get_prev_state_ids()
+ prev_state_ids = dict(prev_state_ids)
+
+ prev_state_ids.update({k: a.event_id for k, a in auth_events.items()})
+
+ # create a new state group as a delta from the existing one.
+ prev_group = context.state_group
+ state_group = await self._state_store.store_state_group(
+ event.event_id,
+ event.room_id,
+ prev_group=prev_group,
+ delta_ids=state_updates,
+ current_state_ids=current_state_ids,
+ )
+
+ return EventContext.with_state(
+ state_group=state_group,
+ state_group_before_event=context.state_group_before_event,
+ current_state_ids=current_state_ids,
+ prev_state_ids=prev_state_ids,
+ prev_group=prev_group,
+ delta_ids=state_updates,
+ )
+
+ async def _run_push_actions_and_persist_event(
+ self, event: EventBase, context: EventContext, backfilled: bool = False
+ ):
+ """Run the push actions for a received event, and persist it.
+
+ Args:
+ event: The event itself.
+ context: The event context.
+ backfilled: True if the event was backfilled.
+ """
+ try:
+ if (
+ not event.internal_metadata.is_outlier()
+ and not backfilled
+ and not context.rejected
+ and (await self._store.get_min_depth(event.room_id)) <= event.depth
+ ):
+ await self._action_generator.handle_push_actions_for_event(
+ event, context
+ )
+
+ await self.persist_events_and_notify(
+ event.room_id, [(event, context)], backfilled=backfilled
+ )
+ except Exception:
+ run_in_background(
+ self._store.remove_push_actions_from_staging, event.event_id
+ )
+ raise
+
+ async def persist_events_and_notify(
+ self,
+ room_id: str,
+ event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
+ backfilled: bool = False,
+ ) -> int:
+ """Persists events and tells the notifier/pushers about them, if
+ necessary.
+
+ Args:
+ room_id: The room ID of events being persisted.
+ event_and_contexts: Sequence of events with their associated
+ context that should be persisted. All events must belong to
+ the same room.
+ backfilled: Whether these events are a result of
+ backfilling or not
+
+ Returns:
+ The stream ID after which all events have been persisted.
+ """
+ if not event_and_contexts:
+ return self._store.get_current_events_token()
+
+ instance = self._config.worker.events_shard_config.get_instance(room_id)
+ if instance != self._instance_name:
+ # Limit the number of events sent over replication. We choose 200
+ # here as that is what we default to in `max_request_body_size(..)`
+ for batch in batch_iter(event_and_contexts, 200):
+ result = await self._send_events(
+ instance_name=instance,
+ store=self._store,
+ room_id=room_id,
+ event_and_contexts=batch,
+ backfilled=backfilled,
+ )
+ return result["max_stream_id"]
+ else:
+ assert self._storage.persistence
+
+ # Note that this returns the events that were persisted, which may not be
+ # the same as were passed in if some were deduplicated due to transaction IDs.
+ events, max_stream_token = await self._storage.persistence.persist_events(
+ event_and_contexts, backfilled=backfilled
+ )
+
+ if self._ephemeral_messages_enabled:
+ for event in events:
+ # If there's an expiry timestamp on the event, schedule its expiry.
+ self._message_handler.maybe_schedule_expiry(event)
+
+ if not backfilled: # Never notify for backfilled events
+ for event in events:
+ await self._notify_persisted_event(event, max_stream_token)
+
+ return max_stream_token.stream
+
+ async def _notify_persisted_event(
+ self, event: EventBase, max_stream_token: RoomStreamToken
+ ) -> None:
+ """Checks to see if notifier/pushers should be notified about the
+ event or not.
+
+ Args:
+ event:
+ max_stream_token: The max_stream_id returned by persist_events
+ """
+
+ extra_users = []
+ if event.type == EventTypes.Member:
+ target_user_id = event.state_key
+
+ # We notify for memberships if its an invite for one of our
+ # users
+ if event.internal_metadata.is_outlier():
+ if event.membership != Membership.INVITE:
+ if not self._is_mine_id(target_user_id):
+ return
+
+ target_user = UserID.from_string(target_user_id)
+ extra_users.append(target_user)
+ elif event.internal_metadata.is_outlier():
+ return
+
+ # the event has been persisted so it should have a stream ordering.
+ assert event.internal_metadata.stream_ordering
+
+ event_pos = PersistedEventPosition(
+ self._instance_name, event.internal_metadata.stream_ordering
+ )
+ self._notifier.on_new_room_event(
+ event, event_pos, max_stream_token, extra_users=extra_users
+ )
+
+ def _sanity_check_event(self, ev: EventBase) -> None:
+ """
+ Do some early sanity checks of a received event
+
+ In particular, checks it doesn't have an excessive number of
+ prev_events or auth_events, which could cause a huge state resolution
+ or cascade of event fetches.
+
+ Args:
+ ev: event to be checked
+
+ Raises:
+ SynapseError if the event does not pass muster
+ """
+ if len(ev.prev_event_ids()) > 20:
+ logger.warning(
+ "Rejecting event %s which has %i prev_events",
+ ev.event_id,
+ len(ev.prev_event_ids()),
+ )
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events")
+
+ if len(ev.auth_event_ids()) > 10:
+ logger.warning(
+ "Rejecting event %s which has %i auth_events",
+ ev.event_id,
+ len(ev.auth_event_ids()),
+ )
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
+
+ async def get_min_depth_for_context(self, context: str) -> int:
+ return await self._store.get_min_depth(context)
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 0961dec5ab..8ffeabacf9 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -824,6 +824,7 @@ class IdentityHandler(BaseHandler):
room_avatar_url: str,
room_join_rules: str,
room_name: str,
+ room_type: Optional[str],
inviter_display_name: str,
inviter_avatar_url: str,
id_access_token: Optional[str] = None,
@@ -843,6 +844,7 @@ class IdentityHandler(BaseHandler):
notifications.
room_join_rules: The join rules of the email (e.g. "public").
room_name: The m.room.name of the room.
+ room_type: The type of the room from its m.room.create event (e.g "m.space").
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
@@ -869,6 +871,10 @@ class IdentityHandler(BaseHandler):
"sender_display_name": inviter_display_name,
"sender_avatar_url": inviter_avatar_url,
}
+
+ if room_type is not None:
+ invite_config["org.matrix.msc3288.room_type"] = room_type
+
# If a custom web client location is available, include it in the request.
if self._web_client_location:
invite_config["org.matrix.web_client_location"] = self._web_client_location
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index e1c544a3c9..4e8f7f1d85 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -151,7 +151,7 @@ class InitialSyncHandler(BaseHandler):
limit = 10
async def handle_room(event: RoomsForUser):
- d = {
+ d: JsonDict = {
"room_id": event.room_id,
"membership": event.membership,
"visibility": (
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 8a0024ce84..bf0fef1510 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -27,6 +27,7 @@ from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
+ GuestAccess,
Membership,
RelationTypes,
UserTypes,
@@ -183,20 +184,37 @@ class MessageHandler:
if not last_events:
raise NotFoundError("Can't find event for token %s" % (at_token,))
+ last_event = last_events[0]
+
+ # check whether the user is in the room at that time to determine
+ # whether they should be treated as peeking.
+ state_map = await self.state_store.get_state_for_event(
+ last_event.event_id,
+ StateFilter.from_types([(EventTypes.Member, user_id)]),
+ )
+
+ joined = False
+ membership_event = state_map.get((EventTypes.Member, user_id))
+ if membership_event:
+ joined = membership_event.membership == Membership.JOIN
+
+ is_peeking = not joined
visible_events = await filter_events_for_client(
self.storage,
user_id,
last_events,
filter_send_to_client=False,
+ is_peeking=is_peeking,
)
- event = last_events[0]
if visible_events:
room_state_events = await self.state_store.get_state_for_events(
- [event.event_id], state_filter=state_filter
+ [last_event.event_id], state_filter=state_filter
)
- room_state: Mapping[Any, EventBase] = room_state_events[event.event_id]
+ room_state: Mapping[Any, EventBase] = room_state_events[
+ last_event.event_id
+ ]
else:
raise AuthError(
403,
@@ -409,7 +427,7 @@ class EventCreationHandler:
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
- # This is only used to get at ratelimit function, and maybe_kick_guest_users
+ # This is only used to get at ratelimit function
self.base_handler = BaseHandler(hs)
# We arbitrarily limit concurrent event creation for a room to 5.
@@ -1289,7 +1307,7 @@ class EventCreationHandler:
requester, is_admin_redaction=is_admin_redaction
)
- await self.base_handler.maybe_kick_guest_users(event, context)
+ await self._maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
# Validate a newly added alias or newly added alt_aliases.
@@ -1376,6 +1394,9 @@ class EventCreationHandler:
allow_none=True,
)
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
# we can make some additional checks now if we have the original event.
if original_event:
if original_event.type == EventTypes.Create:
@@ -1387,6 +1408,28 @@ class EventCreationHandler:
if original_event.type == EventTypes.ServerACL:
raise AuthError(403, "Redacting server ACL events is not permitted")
+ # Add a little safety stop-gap to prevent people from trying to
+ # redact MSC2716 related events when they're in a room version
+ # which does not support it yet. We allow people to use MSC2716
+ # events in existing room versions but only from the room
+ # creator since it does not require any changes to the auth
+ # rules and in effect, the redaction algorithm . In the
+ # supported room version, we add the `historical` power level to
+ # auth the MSC2716 related events and adjust the redaction
+ # algorthim to keep the `historical` field around (redacting an
+ # event should only strip fields which don't affect the
+ # structural protocol level).
+ is_msc2716_event = (
+ original_event.type == EventTypes.MSC2716_INSERTION
+ or original_event.type == EventTypes.MSC2716_CHUNK
+ or original_event.type == EventTypes.MSC2716_MARKER
+ )
+ if not room_version_obj.msc2716_historical and is_msc2716_event:
+ raise AuthError(
+ 403,
+ "Redacting MSC2716 events is not supported in this room version",
+ )
+
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
@@ -1394,9 +1437,6 @@ class EventCreationHandler:
auth_events_map = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_map.values()}
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
if event_auth.check_redaction(
room_version_obj, event, auth_events=auth_events
):
@@ -1454,6 +1494,28 @@ class EventCreationHandler:
return event
+ async def _maybe_kick_guest_users(
+ self, event: EventBase, context: EventContext
+ ) -> None:
+ if event.type != EventTypes.GuestAccess:
+ return
+
+ guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
+ if guest_access == GuestAccess.CAN_JOIN:
+ return
+
+ current_state_ids = await context.get_current_state_ids()
+
+ # since this is a client-generated event, it cannot be an outlier and we must
+ # therefore have the state ids.
+ assert current_state_ids is not None
+ current_state_dict = await self.store.get_events(
+ list(current_state_ids.values())
+ )
+ current_state = list(current_state_dict.values())
+ logger.info("maybe_kick_guest_users %r", current_state)
+ await self.hs.get_room_member_handler().kick_guest_users(current_state)
+
async def _bump_active_time(self, user: UserID) -> None:
try:
presence = self.hs.get_presence_handler()
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 016c5df2ca..4418d63df7 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -353,6 +353,11 @@ class BasePresenceHandler(abc.ABC):
# otherwise would not do).
await self.set_state(UserID.from_string(user_id), state, force_notify=True)
+ async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
+ raise NotImplementedError(
+ "Attempting to check presence on a non-presence worker."
+ )
+
class _NullContextManager(ContextManager[None]):
"""A context manager which does nothing."""
@@ -1184,8 +1189,7 @@ class PresenceHandler(BasePresenceHandler):
new_fields = {"state": presence}
if not ignore_status_msg:
- msg = status_msg if presence != PresenceState.OFFLINE else None
- new_fields["status_msg"] = msg
+ new_fields["status_msg"] = status_msg
if presence == PresenceState.ONLINE or (
presence == PresenceState.BUSY and self._busy_presence_enabled
@@ -1478,7 +1482,7 @@ def format_user_presence_state(
content["user_id"] = state.user_id
if state.last_active_ts:
content["last_active_ago"] = now - state.last_active_ts
- if state.status_msg and state.state != PresenceState.OFFLINE:
+ if state.status_msg:
content["status_msg"] = state.status_msg
if state.state == PresenceState.ONLINE:
content["currently_active"] = state.currently_active
@@ -1840,9 +1844,7 @@ def handle_timeout(
# don't set them as offline.
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
- state = state.copy_and_replace(
- state=PresenceState.OFFLINE, status_msg=None
- )
+ state = state.copy_and_replace(state=PresenceState.OFFLINE)
changed = True
else:
# We expect to be poked occasionally by the other side.
@@ -1850,7 +1852,7 @@ def handle_timeout(
# no one gets stuck online forever.
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
# The other side seems to have disappeared.
- state = state.copy_and_replace(state=PresenceState.OFFLINE, status_msg=None)
+ state = state.copy_and_replace(state=PresenceState.OFFLINE)
changed = True
return state if changed else None
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index b9085bbccb..fb495229a7 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -70,7 +70,8 @@ class ReceiptsHandler(BaseHandler):
)
if not is_in_room:
logger.info(
- "Ignoring receipt from %s as we're not in the room",
+ "Ignoring receipt for room %r from server %s as we're not in the room",
+ room_id,
origin,
)
continue
@@ -187,7 +188,14 @@ class ReceiptEventSource:
new_users = {}
for rr_user_id, user_rr in m_read.items():
- hidden = user_rr.get("hidden", None)
+ try:
+ hidden = user_rr.get("hidden")
+ except AttributeError:
+ # Due to https://github.com/matrix-org/synapse/issues/10376
+ # there are cases where user_rr is a string, in those cases
+ # we just ignore the read receipt
+ continue
+
if hidden is not True or rr_user_id == user_id:
new_users[rr_user_id] = user_rr.copy()
# If hidden has a value replace hidden with the correct prefixed key
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 8cf614136e..0ed59d757b 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -56,6 +56,22 @@ login_counter = Counter(
)
+def init_counters_for_auth_provider(auth_provider_id: str) -> None:
+ """Ensure the prometheus counters for the given auth provider are initialised
+
+ This fixes a problem where the counters are not reported for a given auth provider
+ until the user first logs in/registers.
+ """
+ for is_guest in (True, False):
+ login_counter.labels(guest=is_guest, auth_provider=auth_provider_id)
+ for shadow_banned in (True, False):
+ registration_counter.labels(
+ guest=is_guest,
+ shadow_banned=shadow_banned,
+ auth_provider=auth_provider_id,
+ )
+
+
class LoginDict(TypedDict):
device_id: str
access_token: str
@@ -96,6 +112,8 @@ class RegistrationHandler(BaseHandler):
self.session_lifetime = hs.config.session_lifetime
self.access_token_lifetime = hs.config.access_token_lifetime
+ init_counters_for_auth_provider("")
+
async def check_username(
self,
localpart: str,
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index b33fe09f77..0235fd09b4 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -25,7 +25,9 @@ from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
from synapse.api.constants import (
+ EventContentFields,
EventTypes,
+ GuestAccess,
HistoryVisibility,
JoinRules,
Membership,
@@ -909,7 +911,12 @@ class RoomCreationHandler(BaseHandler):
)
return last_stream_id
- config = self._presets_dict[preset_config]
+ try:
+ config = self._presets_dict[preset_config]
+ except KeyError:
+ raise SynapseError(
+ 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON
+ )
creation_content.update({"creator": creator_id})
await send(etype=EventTypes.Create, content=creation_content)
@@ -988,7 +995,8 @@ class RoomCreationHandler(BaseHandler):
if config["guest_can_join"]:
if (EventTypes.GuestAccess, "") not in initial_state:
last_sent_stream_id = await send(
- etype=EventTypes.GuestAccess, content={"guest_access": "can_join"}
+ etype=EventTypes.GuestAccess,
+ content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
)
for (etype, state_key), content in initial_state.items():
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index fae2c098e3..92bb75c848 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -19,7 +19,13 @@ from typing import TYPE_CHECKING, Optional, Tuple
import msgpack
from unpaddedbase64 import decode_base64, encode_base64
-from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ GuestAccess,
+ HistoryVisibility,
+ JoinRules,
+)
from synapse.api.errors import (
Codes,
HttpResponseException,
@@ -336,8 +342,8 @@ class RoomListHandler(BaseHandler):
guest_event = current_state.get((EventTypes.GuestAccess, ""))
guest = None
if guest_event:
- guest = guest_event.content.get("guest_access", None)
- result["guest_can_join"] = guest == "can_join"
+ guest = guest_event.content.get(EventContentFields.GUEST_ACCESS)
+ result["guest_can_join"] = guest == GuestAccess.CAN_JOIN
avatar_event = current_state.get(("m.room.avatar", ""))
if avatar_event:
@@ -356,6 +362,12 @@ class RoomListHandler(BaseHandler):
include_all_networks: bool = False,
third_party_instance_id: Optional[str] = None,
) -> JsonDict:
+ """Get the public room list from remote server
+
+ Raises:
+ SynapseError
+ """
+
if not self.enable_room_list_search:
return {"chunk": [], "total_room_count_estimate": 0}
@@ -395,13 +407,16 @@ class RoomListHandler(BaseHandler):
limit = None
since_token = None
- res = await self._get_remote_list_cached(
- server_name,
- limit=limit,
- since_token=since_token,
- include_all_networks=include_all_networks,
- third_party_instance_id=third_party_instance_id,
- )
+ try:
+ res = await self._get_remote_list_cached(
+ server_name,
+ limit=limit,
+ since_token=since_token,
+ include_all_networks=include_all_networks,
+ third_party_instance_id=third_party_instance_id,
+ )
+ except (RequestSendFailed, HttpResponseException):
+ raise SynapseError(502, "Failed to fetch room list")
if search_filter:
res = {
@@ -423,20 +438,21 @@ class RoomListHandler(BaseHandler):
include_all_networks: bool = False,
third_party_instance_id: Optional[str] = None,
) -> JsonDict:
+ """Wrapper around FederationClient.get_public_rooms that caches the
+ result.
+ """
+
repl_layer = self.hs.get_federation_client()
if search_filter:
# We can't cache when asking for search
- try:
- return await repl_layer.get_public_rooms(
- server_name,
- limit=limit,
- since_token=since_token,
- search_filter=search_filter,
- include_all_networks=include_all_networks,
- third_party_instance_id=third_party_instance_id,
- )
- except (RequestSendFailed, HttpResponseException):
- raise SynapseError(502, "Failed to fetch room list")
+ return await repl_layer.get_public_rooms(
+ server_name,
+ limit=limit,
+ since_token=since_token,
+ search_filter=search_filter,
+ include_all_networks=include_all_networks,
+ third_party_instance_id=third_party_instance_id,
+ )
key = (
server_name,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 65ad3efa6a..4390201641 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -19,7 +19,13 @@ from http import HTTPStatus
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
from synapse import types
-from synapse.api.constants import AccountDataTypes, EventTypes, Membership
+from synapse.api.constants import (
+ AccountDataTypes,
+ EventContentFields,
+ EventTypes,
+ GuestAccess,
+ Membership,
+)
from synapse.api.errors import (
AuthError,
Codes,
@@ -31,6 +37,7 @@ from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
+from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.types import (
JsonDict,
Requester,
@@ -38,6 +45,7 @@ from synapse.types import (
RoomID,
StateMap,
UserID,
+ create_requester,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer
@@ -64,6 +72,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
+ self._server_name = hs.hostname
self.federation_handler = hs.get_federation_handler()
self.directory_handler = hs.get_directory_handler()
@@ -74,7 +83,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
- self.member_linearizer = Linearizer(name="member")
+ self.member_linearizer: Linearizer = Linearizer(name="member")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
@@ -109,9 +118,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
- # This is only used to get at ratelimit function, and
- # maybe_kick_guest_users. It's fine there are multiple of these as
- # it doesn't store state.
+ # This is only used to get at the ratelimit function. It's fine there are
+ # multiple of these as it doesn't store state.
self.base_handler = BaseHandler(hs)
@abc.abstractmethod
@@ -551,6 +559,20 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content.pop("displayname", None)
content.pop("avatar_url", None)
+ if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
+ raise SynapseError(
+ 400,
+ f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
+ errcode=Codes.BAD_JSON,
+ )
+
+ if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
+ raise SynapseError(
+ 400,
+ f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
+ errcode=Codes.BAD_JSON,
+ )
+
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
@@ -1075,10 +1097,62 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
return bool(
guest_access
and guest_access.content
- and "guest_access" in guest_access.content
- and guest_access.content["guest_access"] == "can_join"
+ and guest_access.content.get(EventContentFields.GUEST_ACCESS)
+ == GuestAccess.CAN_JOIN
)
+ async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
+ """Kick any local guest users from the room.
+
+ This is called when the room state changes from guests allowed to not-allowed.
+
+ Params:
+ current_state: the current state of the room. We will iterate this to look
+ for guest users to kick.
+ """
+ for member_event in current_state:
+ try:
+ if member_event.type != EventTypes.Member:
+ continue
+
+ if not self.hs.is_mine_id(member_event.state_key):
+ continue
+
+ if member_event.content["membership"] not in {
+ Membership.JOIN,
+ Membership.INVITE,
+ }:
+ continue
+
+ if (
+ "kind" not in member_event.content
+ or member_event.content["kind"] != "guest"
+ ):
+ continue
+
+ # We make the user choose to leave, rather than have the
+ # event-sender kick them. This is partially because we don't
+ # need to worry about power levels, and partially because guest
+ # users are a concept which doesn't hugely work over federation,
+ # and having homeservers have their own users leave keeps more
+ # of that decision-making and control local to the guest-having
+ # homeserver.
+ target_user = UserID.from_string(member_event.state_key)
+ requester = create_requester(
+ target_user, is_guest=True, authenticated_entity=self._server_name
+ )
+ handler = self.hs.get_room_member_handler()
+ await handler.update_membership(
+ requester,
+ target_user,
+ member_event.room_id,
+ "leave",
+ ratelimit=False,
+ require_consent=False,
+ )
+ except Exception as e:
+ logger.exception("Error kicking guest user: %s" % (e,))
+
async def lookup_room_alias(
self, room_alias: RoomAlias
) -> Tuple[RoomID, List[str]]:
@@ -1237,6 +1311,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if room_name_event:
room_name = room_name_event.content.get("name", "")
+ room_type = None
+ room_create_event = room_state.get((EventTypes.Create, ""))
+ if room_create_event:
+ room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
+
room_join_rules = ""
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
@@ -1263,6 +1342,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_avatar_url=room_avatar_url,
room_join_rules=room_join_rules,
room_name=room_name,
+ room_type=room_type,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,
@@ -1326,7 +1406,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
self.distributor = hs.get_distributor()
self.distributor.declare("user_left_room")
- self._server_name = hs.hostname
async def _is_remote_room_too_complex(
self, room_id: str, remote_room_hosts: List[str]
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
new file mode 100644
index 0000000000..781da9e811
--- /dev/null
+++ b/synapse/handlers/room_summary.py
@@ -0,0 +1,1182 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import itertools
+import logging
+import re
+from collections import deque
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Set, Tuple
+
+import attr
+
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ HistoryVisibility,
+ JoinRules,
+ Membership,
+ RoomTypes,
+)
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ NotFoundError,
+ StoreError,
+ SynapseError,
+ UnsupportedRoomVersionError,
+)
+from synapse.events import EventBase
+from synapse.types import JsonDict
+from synapse.util.caches.response_cache import ResponseCache
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+# number of rooms to return. We'll stop once we hit this limit.
+MAX_ROOMS = 50
+
+# max number of events to return per room.
+MAX_ROOMS_PER_SPACE = 50
+
+# max number of federation servers to hit per room
+MAX_SERVERS_PER_SPACE = 3
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _PaginationKey:
+ """The key used to find unique pagination session."""
+
+ # The first three entries match the request parameters (and cannot change
+ # during a pagination session).
+ room_id: str
+ suggested_only: bool
+ max_depth: Optional[int]
+ # The randomly generated token.
+ token: str
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _PaginationSession:
+ """The information that is stored for pagination."""
+
+ # The time the pagination session was created, in milliseconds.
+ creation_time_ms: int
+ # The queue of rooms which are still to process.
+ room_queue: List["_RoomQueueEntry"]
+ # A set of rooms which have been processed.
+ processed_rooms: Set[str]
+
+
+class RoomSummaryHandler:
+ # A unique key used for pagination sessions for the room hierarchy endpoint.
+ _PAGINATION_SESSION_TYPE = "room_hierarchy_pagination"
+
+ # The time a pagination session remains valid for.
+ _PAGINATION_SESSION_VALIDITY_PERIOD_MS = 5 * 60 * 1000
+
+ def __init__(self, hs: "HomeServer"):
+ self._event_auth_handler = hs.get_event_auth_handler()
+ self._store = hs.get_datastore()
+ self._event_serializer = hs.get_event_client_serializer()
+ self._server_name = hs.hostname
+ self._federation_client = hs.get_federation_client()
+
+ # If a user tries to fetch the same page multiple times in quick succession,
+ # only process the first attempt and return its result to subsequent requests.
+ self._pagination_response_cache: ResponseCache[
+ Tuple[str, bool, Optional[int], Optional[int], Optional[str]]
+ ] = ResponseCache(
+ hs.get_clock(),
+ "get_room_hierarchy",
+ )
+
+ async def get_space_summary(
+ self,
+ requester: str,
+ room_id: str,
+ suggested_only: bool = False,
+ max_rooms_per_space: Optional[int] = None,
+ ) -> JsonDict:
+ """
+ Implementation of the space summary C-S API
+
+ Args:
+ requester: user id of the user making this request
+
+ room_id: room id to start the summary at
+
+ suggested_only: whether we should only return children with the "suggested"
+ flag set.
+
+ max_rooms_per_space: an optional limit on the number of child rooms we will
+ return. This does not apply to the root room (ie, room_id), and
+ is overridden by MAX_ROOMS_PER_SPACE.
+
+ Returns:
+ summary dict to return
+ """
+ # First of all, check that the room is accessible.
+ if not await self._is_local_room_accessible(room_id, requester):
+ raise AuthError(
+ 403,
+ "User %s not in room %s, and room previews are disabled"
+ % (requester, room_id),
+ )
+
+ # the queue of rooms to process
+ room_queue = deque((_RoomQueueEntry(room_id, ()),))
+
+ # rooms we have already processed
+ processed_rooms: Set[str] = set()
+
+ # events we have already processed. We don't necessarily have their event ids,
+ # so instead we key on (room id, state key)
+ processed_events: Set[Tuple[str, str]] = set()
+
+ rooms_result: List[JsonDict] = []
+ events_result: List[JsonDict] = []
+
+ while room_queue and len(rooms_result) < MAX_ROOMS:
+ queue_entry = room_queue.popleft()
+ room_id = queue_entry.room_id
+ if room_id in processed_rooms:
+ # already done this room
+ continue
+
+ logger.debug("Processing room %s", room_id)
+
+ is_in_room = await self._store.is_host_joined(room_id, self._server_name)
+
+ # The client-specified max_rooms_per_space limit doesn't apply to the
+ # room_id specified in the request, so we ignore it if this is the
+ # first room we are processing.
+ max_children = max_rooms_per_space if processed_rooms else None
+
+ if is_in_room:
+ room_entry = await self._summarize_local_room(
+ requester, None, room_id, suggested_only, max_children
+ )
+
+ events: Sequence[JsonDict] = []
+ if room_entry:
+ rooms_result.append(room_entry.room)
+ events = room_entry.children_state_events
+
+ logger.debug(
+ "Query of local room %s returned events %s",
+ room_id,
+ ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
+ )
+ else:
+ fed_rooms = await self._summarize_remote_room(
+ queue_entry,
+ suggested_only,
+ max_children,
+ exclude_rooms=processed_rooms,
+ )
+
+ # The results over federation might include rooms that the we,
+ # as the requesting server, are allowed to see, but the requesting
+ # user is not permitted see.
+ #
+ # Filter the returned results to only what is accessible to the user.
+ events = []
+ for room_entry in fed_rooms:
+ room = room_entry.room
+ fed_room_id = room_entry.room_id
+
+ # The user can see the room, include it!
+ if await self._is_remote_room_accessible(
+ requester, fed_room_id, room
+ ):
+ # Before returning to the client, remove the allowed_room_ids
+ # and allowed_spaces keys.
+ room.pop("allowed_room_ids", None)
+ room.pop("allowed_spaces", None)
+
+ rooms_result.append(room)
+ events.extend(room_entry.children_state_events)
+
+ # All rooms returned don't need visiting again (even if the user
+ # didn't have access to them).
+ processed_rooms.add(fed_room_id)
+
+ logger.debug(
+ "Query of %s returned rooms %s, events %s",
+ room_id,
+ [room_entry.room.get("room_id") for room_entry in fed_rooms],
+ ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
+ )
+
+ # the room we queried may or may not have been returned, but don't process
+ # it again, anyway.
+ processed_rooms.add(room_id)
+
+ # XXX: is it ok that we blindly iterate through any events returned by
+ # a remote server, whether or not they actually link to any rooms in our
+ # tree?
+ for ev in events:
+ # remote servers might return events we have already processed
+ # (eg, Dendrite returns inward pointers as well as outward ones), so
+ # we need to filter them out, to avoid returning duplicate links to the
+ # client.
+ ev_key = (ev["room_id"], ev["state_key"])
+ if ev_key in processed_events:
+ continue
+ events_result.append(ev)
+
+ # add the child to the queue. we have already validated
+ # that the vias are a list of server names.
+ room_queue.append(
+ _RoomQueueEntry(ev["state_key"], ev["content"]["via"])
+ )
+ processed_events.add(ev_key)
+
+ return {"rooms": rooms_result, "events": events_result}
+
+ async def get_room_hierarchy(
+ self,
+ requester: str,
+ requested_room_id: str,
+ suggested_only: bool = False,
+ max_depth: Optional[int] = None,
+ limit: Optional[int] = None,
+ from_token: Optional[str] = None,
+ ) -> JsonDict:
+ """
+ Implementation of the room hierarchy C-S API.
+
+ Args:
+ requester: The user ID of the user making this request.
+ requested_room_id: The room ID to start the hierarchy at (the "root" room).
+ suggested_only: Whether we should only return children with the "suggested"
+ flag set.
+ max_depth: The maximum depth in the tree to explore, must be a
+ non-negative integer.
+
+ 0 would correspond to just the root room, 1 would include just
+ the root room's children, etc.
+ limit: An optional limit on the number of rooms to return per
+ page. Must be a positive integer.
+ from_token: An optional pagination token.
+
+ Returns:
+ The JSON hierarchy dictionary.
+ """
+ # If a user tries to fetch the same page multiple times in quick succession,
+ # only process the first attempt and return its result to subsequent requests.
+ #
+ # This is due to the pagination process mutating internal state, attempting
+ # to process multiple requests for the same page will result in errors.
+ return await self._pagination_response_cache.wrap(
+ (requested_room_id, suggested_only, max_depth, limit, from_token),
+ self._get_room_hierarchy,
+ requester,
+ requested_room_id,
+ suggested_only,
+ max_depth,
+ limit,
+ from_token,
+ )
+
+ async def _get_room_hierarchy(
+ self,
+ requester: str,
+ requested_room_id: str,
+ suggested_only: bool = False,
+ max_depth: Optional[int] = None,
+ limit: Optional[int] = None,
+ from_token: Optional[str] = None,
+ ) -> JsonDict:
+ """See docstring for SpaceSummaryHandler.get_room_hierarchy."""
+
+ # First of all, check that the room is accessible.
+ if not await self._is_local_room_accessible(requested_room_id, requester):
+ raise AuthError(
+ 403,
+ "User %s not in room %s, and room previews are disabled"
+ % (requester, requested_room_id),
+ )
+
+ # If this is continuing a previous session, pull the persisted data.
+ if from_token:
+ try:
+ pagination_session = await self._store.get_session(
+ session_type=self._PAGINATION_SESSION_TYPE,
+ session_id=from_token,
+ )
+ except StoreError:
+ raise SynapseError(400, "Unknown pagination token", Codes.INVALID_PARAM)
+
+ # If the requester, room ID, suggested-only, or max depth were modified
+ # the session is invalid.
+ if (
+ requester != pagination_session["requester"]
+ or requested_room_id != pagination_session["room_id"]
+ or suggested_only != pagination_session["suggested_only"]
+ or max_depth != pagination_session["max_depth"]
+ ):
+ raise SynapseError(400, "Unknown pagination token", Codes.INVALID_PARAM)
+
+ # Load the previous state.
+ room_queue = [
+ _RoomQueueEntry(*fields) for fields in pagination_session["room_queue"]
+ ]
+ processed_rooms = set(pagination_session["processed_rooms"])
+ else:
+ # The queue of rooms to process, the next room is last on the stack.
+ room_queue = [_RoomQueueEntry(requested_room_id, ())]
+
+ # Rooms we have already processed.
+ processed_rooms = set()
+
+ rooms_result: List[JsonDict] = []
+
+ # Cap the limit to a server-side maximum.
+ if limit is None:
+ limit = MAX_ROOMS
+ else:
+ limit = min(limit, MAX_ROOMS)
+
+ # Iterate through the queue until we reach the limit or run out of
+ # rooms to include.
+ while room_queue and len(rooms_result) < limit:
+ queue_entry = room_queue.pop()
+ room_id = queue_entry.room_id
+ current_depth = queue_entry.depth
+ if room_id in processed_rooms:
+ # already done this room
+ continue
+
+ logger.debug("Processing room %s", room_id)
+
+ # A map of summaries for children rooms that might be returned over
+ # federation. The rationale for caching these and *maybe* using them
+ # is to prefer any information local to the homeserver before trusting
+ # data received over federation.
+ children_room_entries: Dict[str, JsonDict] = {}
+ # A set of room IDs which are children that did not have information
+ # returned over federation and are known to be inaccessible to the
+ # current server. We should not reach out over federation to try to
+ # summarise these rooms.
+ inaccessible_children: Set[str] = set()
+
+ # If the room is known locally, summarise it!
+ is_in_room = await self._store.is_host_joined(room_id, self._server_name)
+ if is_in_room:
+ room_entry = await self._summarize_local_room(
+ requester,
+ None,
+ room_id,
+ suggested_only,
+ # TODO Handle max children.
+ max_children=None,
+ )
+
+ # Otherwise, attempt to use information for federation.
+ else:
+ # A previous call might have included information for this room.
+ # It can be used if either:
+ #
+ # 1. The room is not a space.
+ # 2. The maximum depth has been achieved (since no children
+ # information is needed).
+ if queue_entry.remote_room and (
+ queue_entry.remote_room.get("room_type") != RoomTypes.SPACE
+ or (max_depth is not None and current_depth >= max_depth)
+ ):
+ room_entry = _RoomEntry(
+ queue_entry.room_id, queue_entry.remote_room
+ )
+
+ # If the above isn't true, attempt to fetch the room
+ # information over federation.
+ else:
+ (
+ room_entry,
+ children_room_entries,
+ inaccessible_children,
+ ) = await self._summarize_remote_room_hierarchy(
+ queue_entry,
+ suggested_only,
+ )
+
+ # Ensure this room is accessible to the requester (and not just
+ # the homeserver).
+ if room_entry and not await self._is_remote_room_accessible(
+ requester, queue_entry.room_id, room_entry.room
+ ):
+ room_entry = None
+
+ # This room has been processed and should be ignored if it appears
+ # elsewhere in the hierarchy.
+ processed_rooms.add(room_id)
+
+ # There may or may not be a room entry based on whether it is
+ # inaccessible to the requesting user.
+ if room_entry:
+ # Add the room (including the stripped m.space.child events).
+ rooms_result.append(room_entry.as_json())
+
+ # If this room is not at the max-depth, check if there are any
+ # children to process.
+ if max_depth is None or current_depth < max_depth:
+ # The children get added in reverse order so that the next
+ # room to process, according to the ordering, is the last
+ # item in the list.
+ room_queue.extend(
+ _RoomQueueEntry(
+ ev["state_key"],
+ ev["content"]["via"],
+ current_depth + 1,
+ children_room_entries.get(ev["state_key"]),
+ )
+ for ev in reversed(room_entry.children_state_events)
+ if ev["type"] == EventTypes.SpaceChild
+ and ev["state_key"] not in inaccessible_children
+ )
+
+ result: JsonDict = {"rooms": rooms_result}
+
+ # If there's additional data, generate a pagination token (and persist state).
+ if room_queue:
+ result["next_batch"] = await self._store.create_session(
+ session_type=self._PAGINATION_SESSION_TYPE,
+ value={
+ # Information which must be identical across pagination.
+ "requester": requester,
+ "room_id": requested_room_id,
+ "suggested_only": suggested_only,
+ "max_depth": max_depth,
+ # The stored state.
+ "room_queue": [
+ attr.astuple(room_entry) for room_entry in room_queue
+ ],
+ "processed_rooms": list(processed_rooms),
+ },
+ expiry_ms=self._PAGINATION_SESSION_VALIDITY_PERIOD_MS,
+ )
+
+ return result
+
+ async def federation_space_summary(
+ self,
+ origin: str,
+ room_id: str,
+ suggested_only: bool,
+ max_rooms_per_space: Optional[int],
+ exclude_rooms: Iterable[str],
+ ) -> JsonDict:
+ """
+ Implementation of the space summary Federation API
+
+ Args:
+ origin: The server requesting the spaces summary.
+
+ room_id: room id to start the summary at
+
+ suggested_only: whether we should only return children with the "suggested"
+ flag set.
+
+ max_rooms_per_space: an optional limit on the number of child rooms we will
+ return. Unlike the C-S API, this applies to the root room (room_id).
+ It is clipped to MAX_ROOMS_PER_SPACE.
+
+ exclude_rooms: a list of rooms to skip over (presumably because the
+ calling server has already seen them).
+
+ Returns:
+ summary dict to return
+ """
+ # the queue of rooms to process
+ room_queue = deque((room_id,))
+
+ # the set of rooms that we should not walk further. Initialise it with the
+ # excluded-rooms list; we will add other rooms as we process them so that
+ # we do not loop.
+ processed_rooms: Set[str] = set(exclude_rooms)
+
+ rooms_result: List[JsonDict] = []
+ events_result: List[JsonDict] = []
+
+ while room_queue and len(rooms_result) < MAX_ROOMS:
+ room_id = room_queue.popleft()
+ if room_id in processed_rooms:
+ # already done this room
+ continue
+
+ room_entry = await self._summarize_local_room(
+ None, origin, room_id, suggested_only, max_rooms_per_space
+ )
+
+ processed_rooms.add(room_id)
+
+ if room_entry:
+ rooms_result.append(room_entry.room)
+ events_result.extend(room_entry.children_state_events)
+
+ # add any children to the queue
+ room_queue.extend(
+ edge_event["state_key"]
+ for edge_event in room_entry.children_state_events
+ )
+
+ return {"rooms": rooms_result, "events": events_result}
+
+ async def get_federation_hierarchy(
+ self,
+ origin: str,
+ requested_room_id: str,
+ suggested_only: bool,
+ ):
+ """
+ Implementation of the room hierarchy Federation API.
+
+ This is similar to get_room_hierarchy, but does not recurse into the space.
+ It also considers whether anyone on the server may be able to access the
+ room, as opposed to whether a specific user can.
+
+ Args:
+ origin: The server requesting the spaces summary.
+ requested_room_id: The room ID to start the hierarchy at (the "root" room).
+ suggested_only: whether we should only return children with the "suggested"
+ flag set.
+
+ Returns:
+ The JSON hierarchy dictionary.
+ """
+ root_room_entry = await self._summarize_local_room(
+ None, origin, requested_room_id, suggested_only, max_children=None
+ )
+ if root_room_entry is None:
+ # Room is inaccessible to the requesting server.
+ raise SynapseError(404, "Unknown room: %s" % (requested_room_id,))
+
+ children_rooms_result: List[JsonDict] = []
+ inaccessible_children: List[str] = []
+
+ # Iterate through each child and potentially add it, but not its children,
+ # to the response.
+ for child_room in root_room_entry.children_state_events:
+ room_id = child_room.get("state_key")
+ assert isinstance(room_id, str)
+ # If the room is unknown, skip it.
+ if not await self._store.is_host_joined(room_id, self._server_name):
+ continue
+
+ room_entry = await self._summarize_local_room(
+ None, origin, room_id, suggested_only, max_children=0
+ )
+ # If the room is accessible, include it in the results.
+ #
+ # Note that only the room summary (without information on children)
+ # is included in the summary.
+ if room_entry:
+ children_rooms_result.append(room_entry.room)
+ # Otherwise, note that the requesting server shouldn't bother
+ # trying to summarize this room - they do not have access to it.
+ else:
+ inaccessible_children.append(room_id)
+
+ return {
+ # Include the requested room (including the stripped children events).
+ "room": root_room_entry.as_json(),
+ "children": children_rooms_result,
+ "inaccessible_children": inaccessible_children,
+ }
+
+ async def _summarize_local_room(
+ self,
+ requester: Optional[str],
+ origin: Optional[str],
+ room_id: str,
+ suggested_only: bool,
+ max_children: Optional[int],
+ ) -> Optional["_RoomEntry"]:
+ """
+ Generate a room entry and a list of event entries for a given room.
+
+ Args:
+ requester:
+ The user requesting the summary, if it is a local request. None
+ if this is a federation request.
+ origin:
+ The server requesting the summary, if it is a federation request.
+ None if this is a local request.
+ room_id: The room ID to summarize.
+ suggested_only: True if only suggested children should be returned.
+ Otherwise, all children are returned.
+ max_children:
+ The maximum number of children rooms to include. This is capped
+ to a server-set limit.
+
+ Returns:
+ A room entry if the room should be returned. None, otherwise.
+ """
+ if not await self._is_local_room_accessible(room_id, requester, origin):
+ return None
+
+ room_entry = await self._build_room_entry(room_id, for_federation=bool(origin))
+
+ # If the room is not a space or the children don't matter, return just
+ # the room information.
+ if room_entry.get("room_type") != RoomTypes.SPACE or max_children == 0:
+ return _RoomEntry(room_id, room_entry)
+
+ # Otherwise, look for child rooms/spaces.
+ child_events = await self._get_child_events(room_id)
+
+ if suggested_only:
+ # we only care about suggested children
+ child_events = filter(_is_suggested_child_event, child_events)
+
+ if max_children is None or max_children > MAX_ROOMS_PER_SPACE:
+ max_children = MAX_ROOMS_PER_SPACE
+
+ stripped_events: List[JsonDict] = [
+ {
+ "type": e.type,
+ "state_key": e.state_key,
+ "content": e.content,
+ "room_id": e.room_id,
+ "sender": e.sender,
+ "origin_server_ts": e.origin_server_ts,
+ }
+ for e in itertools.islice(child_events, max_children)
+ ]
+ return _RoomEntry(room_id, room_entry, stripped_events)
+
+ async def _summarize_remote_room(
+ self,
+ room: "_RoomQueueEntry",
+ suggested_only: bool,
+ max_children: Optional[int],
+ exclude_rooms: Iterable[str],
+ ) -> Iterable["_RoomEntry"]:
+ """
+ Request room entries and a list of event entries for a given room by querying a remote server.
+
+ Args:
+ room: The room to summarize.
+ suggested_only: True if only suggested children should be returned.
+ Otherwise, all children are returned.
+ max_children:
+ The maximum number of children rooms to include. This is capped
+ to a server-set limit.
+ exclude_rooms:
+ Rooms IDs which do not need to be summarized.
+
+ Returns:
+ An iterable of room entries.
+ """
+ room_id = room.room_id
+ logger.info("Requesting summary for %s via %s", room_id, room.via)
+
+ # we need to make the exclusion list json-serialisable
+ exclude_rooms = list(exclude_rooms)
+
+ via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE)
+ try:
+ res = await self._federation_client.get_space_summary(
+ via,
+ room_id,
+ suggested_only=suggested_only,
+ max_rooms_per_space=max_children,
+ exclude_rooms=exclude_rooms,
+ )
+ except Exception as e:
+ logger.warning(
+ "Unable to get summary of %s via federation: %s",
+ room_id,
+ e,
+ exc_info=logger.isEnabledFor(logging.DEBUG),
+ )
+ return ()
+
+ # Group the events by their room.
+ children_by_room: Dict[str, List[JsonDict]] = {}
+ for ev in res.events:
+ if ev.event_type == EventTypes.SpaceChild:
+ children_by_room.setdefault(ev.room_id, []).append(ev.data)
+
+ # Generate the final results.
+ results = []
+ for fed_room in res.rooms:
+ fed_room_id = fed_room.get("room_id")
+ if not fed_room_id or not isinstance(fed_room_id, str):
+ continue
+
+ results.append(
+ _RoomEntry(
+ fed_room_id,
+ fed_room,
+ children_by_room.get(fed_room_id, []),
+ )
+ )
+
+ return results
+
+ async def _summarize_remote_room_hierarchy(
+ self, room: "_RoomQueueEntry", suggested_only: bool
+ ) -> Tuple[Optional["_RoomEntry"], Dict[str, JsonDict], Set[str]]:
+ """
+ Request room entries and a list of event entries for a given room by querying a remote server.
+
+ Args:
+ room: The room to summarize.
+ suggested_only: True if only suggested children should be returned.
+ Otherwise, all children are returned.
+
+ Returns:
+ A tuple of:
+ The room entry.
+ Partial room data return over federation.
+ A set of inaccessible children room IDs.
+ """
+ room_id = room.room_id
+ logger.info("Requesting summary for %s via %s", room_id, room.via)
+
+ via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE)
+ try:
+ (
+ room_response,
+ children,
+ inaccessible_children,
+ ) = await self._federation_client.get_room_hierarchy(
+ via,
+ room_id,
+ suggested_only=suggested_only,
+ )
+ except Exception as e:
+ logger.warning(
+ "Unable to get hierarchy of %s via federation: %s",
+ room_id,
+ e,
+ exc_info=logger.isEnabledFor(logging.DEBUG),
+ )
+ return None, {}, set()
+
+ # Map the children to their room ID.
+ children_by_room_id = {
+ c["room_id"]: c
+ for c in children
+ if "room_id" in c and isinstance(c["room_id"], str)
+ }
+
+ return (
+ _RoomEntry(room_id, room_response, room_response.pop("children_state", ())),
+ children_by_room_id,
+ set(inaccessible_children),
+ )
+
+ async def _is_local_room_accessible(
+ self, room_id: str, requester: Optional[str], origin: Optional[str] = None
+ ) -> bool:
+ """
+ Calculate whether the room should be shown to the requester.
+
+ It should return true if:
+
+ * The requester is joined or can join the room (per MSC3173).
+ * The origin server has any user that is joined or can join the room.
+ * The history visibility is set to world readable.
+
+ Args:
+ room_id: The room ID to check accessibility of.
+ requester:
+ The user making the request, if it is a local request.
+ None if this is a federation request.
+ origin:
+ The server making the request, if it is a federation request.
+ None if this is a local request.
+
+ Returns:
+ True if the room is accessible to the requesting user or server.
+ """
+ state_ids = await self._store.get_current_state_ids(room_id)
+
+ # If there's no state for the room, it isn't known.
+ if not state_ids:
+ # The user might have a pending invite for the room.
+ if requester and await self._store.get_invite_for_local_user_in_room(
+ requester, room_id
+ ):
+ return True
+
+ logger.info("room %s is unknown, omitting from summary", room_id)
+ return False
+
+ try:
+ room_version = await self._store.get_room_version(room_id)
+ except UnsupportedRoomVersionError:
+ # If a room with an unsupported room version is encountered, ignore
+ # it to avoid breaking the entire summary response.
+ return False
+
+ # Include the room if it has join rules of public or knock.
+ join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
+ if join_rules_event_id:
+ join_rules_event = await self._store.get_event(join_rules_event_id)
+ join_rule = join_rules_event.content.get("join_rule")
+ if join_rule == JoinRules.PUBLIC or (
+ room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
+ ):
+ return True
+
+ # Include the room if it is peekable.
+ hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""))
+ if hist_vis_event_id:
+ hist_vis_ev = await self._store.get_event(hist_vis_event_id)
+ hist_vis = hist_vis_ev.content.get("history_visibility")
+ if hist_vis == HistoryVisibility.WORLD_READABLE:
+ return True
+
+ # Otherwise we need to check information specific to the user or server.
+
+ # If we have an authenticated requesting user, check if they are a member
+ # of the room (or can join the room).
+ if requester:
+ member_event_id = state_ids.get((EventTypes.Member, requester), None)
+
+ # If they're in the room they can see info on it.
+ if member_event_id:
+ member_event = await self._store.get_event(member_event_id)
+ if member_event.membership in (Membership.JOIN, Membership.INVITE):
+ return True
+
+ # Otherwise, check if they should be allowed access via membership in a space.
+ if await self._event_auth_handler.has_restricted_join_rules(
+ state_ids, room_version
+ ):
+ allowed_rooms = (
+ await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
+ )
+ if await self._event_auth_handler.is_user_in_rooms(
+ allowed_rooms, requester
+ ):
+ return True
+
+ # If this is a request over federation, check if the host is in the room or
+ # has a user who could join the room.
+ elif origin:
+ if await self._event_auth_handler.check_host_in_room(
+ room_id, origin
+ ) or await self._store.is_host_invited(room_id, origin):
+ return True
+
+ # Alternately, if the host has a user in any of the spaces specified
+ # for access, then the host can see this room (and should do filtering
+ # if the requester cannot see it).
+ if await self._event_auth_handler.has_restricted_join_rules(
+ state_ids, room_version
+ ):
+ allowed_rooms = (
+ await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
+ )
+ for space_id in allowed_rooms:
+ if await self._event_auth_handler.check_host_in_room(
+ space_id, origin
+ ):
+ return True
+
+ logger.info(
+ "room %s is unpeekable and requester %s is not a member / not allowed to join, omitting from summary",
+ room_id,
+ requester or origin,
+ )
+ return False
+
+ async def _is_remote_room_accessible(
+ self, requester: str, room_id: str, room: JsonDict
+ ) -> bool:
+ """
+ Calculate whether the room received over federation should be shown to the requester.
+
+ It should return true if:
+
+ * The requester is joined or can join the room (per MSC3173).
+ * The history visibility is set to world readable.
+
+ Note that the local server is not in the requested room (which is why the
+ remote call was made in the first place), but the user could have access
+ due to an invite, etc.
+
+ Args:
+ requester: The user requesting the summary.
+ room_id: The room ID returned over federation.
+ room: The summary of the room returned over federation.
+
+ Returns:
+ True if the room is accessible to the requesting user.
+ """
+ # The API doesn't return the room version so assume that a
+ # join rule of knock is valid.
+ if (
+ room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
+ or room.get("world_readable") is True
+ ):
+ return True
+
+ # Check if the user is a member of any of the allowed spaces
+ # from the response.
+ allowed_rooms = room.get("allowed_room_ids") or room.get("allowed_spaces")
+ if allowed_rooms and isinstance(allowed_rooms, list):
+ if await self._event_auth_handler.is_user_in_rooms(
+ allowed_rooms, requester
+ ):
+ return True
+
+ # Finally, check locally if we can access the room. The user might
+ # already be in the room (if it was a child room), or there might be a
+ # pending invite, etc.
+ return await self._is_local_room_accessible(room_id, requester)
+
+ async def _build_room_entry(self, room_id: str, for_federation: bool) -> JsonDict:
+ """
+ Generate en entry summarising a single room.
+
+ Args:
+ room_id: The room ID to summarize.
+ for_federation: True if this is a summary requested over federation
+ (which includes additional fields).
+
+ Returns:
+ The JSON dictionary for the room.
+ """
+ stats = await self._store.get_room_with_stats(room_id)
+
+ # currently this should be impossible because we call
+ # _is_local_room_accessible on the room before we get here, so
+ # there should always be an entry
+ assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
+
+ current_state_ids = await self._store.get_current_state_ids(room_id)
+ create_event = await self._store.get_event(
+ current_state_ids[(EventTypes.Create, "")]
+ )
+
+ entry = {
+ "room_id": stats["room_id"],
+ "name": stats["name"],
+ "topic": stats["topic"],
+ "canonical_alias": stats["canonical_alias"],
+ "num_joined_members": stats["joined_members"],
+ "avatar_url": stats["avatar"],
+ "join_rules": stats["join_rules"],
+ "world_readable": (
+ stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
+ ),
+ "guest_can_join": stats["guest_access"] == "can_join",
+ "creation_ts": create_event.origin_server_ts,
+ "room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
+ }
+
+ # Federation requests need to provide additional information so the
+ # requested server is able to filter the response appropriately.
+ if for_federation:
+ room_version = await self._store.get_room_version(room_id)
+ if await self._event_auth_handler.has_restricted_join_rules(
+ current_state_ids, room_version
+ ):
+ allowed_rooms = (
+ await self._event_auth_handler.get_rooms_that_allow_join(
+ current_state_ids
+ )
+ )
+ if allowed_rooms:
+ entry["allowed_room_ids"] = allowed_rooms
+ # TODO Remove this key once the API is stable.
+ entry["allowed_spaces"] = allowed_rooms
+
+ # Filter out Nones – rather omit the field altogether
+ room_entry = {k: v for k, v in entry.items() if v is not None}
+
+ return room_entry
+
+ async def _get_child_events(self, room_id: str) -> Iterable[EventBase]:
+ """
+ Get the child events for a given room.
+
+ The returned results are sorted for stability.
+
+ Args:
+ room_id: The room id to get the children of.
+
+ Returns:
+ An iterable of sorted child events.
+ """
+
+ # look for child rooms/spaces.
+ current_state_ids = await self._store.get_current_state_ids(room_id)
+
+ events = await self._store.get_events_as_list(
+ [
+ event_id
+ for key, event_id in current_state_ids.items()
+ if key[0] == EventTypes.SpaceChild
+ ]
+ )
+
+ # filter out any events without a "via" (which implies it has been redacted),
+ # and order to ensure we return stable results.
+ return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key)
+
+ async def get_room_summary(
+ self,
+ requester: Optional[str],
+ room_id: str,
+ remote_room_hosts: Optional[List[str]] = None,
+ ) -> JsonDict:
+ """
+ Implementation of the room summary C-S API from MSC3266
+
+ Args:
+ requester: user id of the user making this request, will be None
+ for unauthenticated requests
+
+ room_id: room id to summarise.
+
+ remote_room_hosts: a list of homeservers to try fetching data through
+ if we don't know it ourselves
+
+ Returns:
+ summary dict to return
+ """
+ is_in_room = await self._store.is_host_joined(room_id, self._server_name)
+
+ if is_in_room:
+ room_entry = await self._summarize_local_room(
+ requester,
+ None,
+ room_id,
+ # Suggested-only doesn't matter since no children are requested.
+ suggested_only=False,
+ max_children=0,
+ )
+
+ if not room_entry:
+ raise NotFoundError("Room not found or is not accessible")
+
+ room_summary = room_entry.room
+
+ # If there was a requester, add their membership.
+ if requester:
+ (
+ membership,
+ _,
+ ) = await self._store.get_local_current_membership_for_user_in_room(
+ requester, room_id
+ )
+
+ room_summary["membership"] = membership or "leave"
+ else:
+ # TODO federation API, descoped from initial unstable implementation
+ # as MSC needs more maturing on that side.
+ raise SynapseError(400, "Federation is not currently supported.")
+
+ return room_summary
+
+
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class _RoomQueueEntry:
+ # The room ID of this entry.
+ room_id: str
+ # The server to query if the room is not known locally.
+ via: Sequence[str]
+ # The minimum number of hops necessary to get to this room (compared to the
+ # originally requested room).
+ depth: int = 0
+ # The room summary for this room returned via federation. This will only be
+ # used if the room is not known locally (and is not a space).
+ remote_room: Optional[JsonDict] = None
+
+
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class _RoomEntry:
+ room_id: str
+ # The room summary for this room.
+ room: JsonDict
+ # An iterable of the sorted, stripped children events for children of this room.
+ #
+ # This may not include all children.
+ children_state_events: Sequence[JsonDict] = ()
+
+ def as_json(self) -> JsonDict:
+ """
+ Returns a JSON dictionary suitable for the room hierarchy endpoint.
+
+ It returns the room summary including the stripped m.space.child events
+ as a sub-key.
+ """
+ result = dict(self.room)
+ result["children_state"] = self.children_state_events
+ return result
+
+
+def _has_valid_via(e: EventBase) -> bool:
+ via = e.content.get("via")
+ if not via or not isinstance(via, Sequence):
+ return False
+ for v in via:
+ if not isinstance(v, str):
+ logger.debug("Ignoring edge event %s with invalid via entry", e.event_id)
+ return False
+ return True
+
+
+def _is_suggested_child_event(edge_event: EventBase) -> bool:
+ suggested = edge_event.content.get("suggested")
+ if isinstance(suggested, bool) and suggested:
+ return True
+ logger.debug("Ignorning not-suggested child %s", edge_event.state_key)
+ return False
+
+
+# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive.
+_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
+
+
+def _child_events_comparison_key(
+ child: EventBase,
+) -> Tuple[bool, Optional[str], int, str]:
+ """
+ Generate a value for comparing two child events for ordering.
+
+ The rules for ordering are:
+
+ 1. The 'order' key, if it is valid.
+ 2. The 'origin_server_ts' of the 'm.space.child' event.
+ 3. The 'room_id'.
+
+ Args:
+ child: The event for generating a comparison key.
+
+ Returns:
+ The comparison key as a tuple of:
+ False if the ordering is valid.
+ The 'order' field or None if it is not given or invalid.
+ The 'origin_server_ts' field.
+ The room ID.
+ """
+ order = child.content.get("order")
+ # If order is not a string or doesn't meet the requirements, ignore it.
+ if not isinstance(order, str):
+ order = None
+ elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
+ order = None
+
+ # Items without an order come last.
+ return (order is None, order, child.origin_server_ts, child.room_id)
diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py
index e9f6aef06f..a31fe3e3c7 100644
--- a/synapse/handlers/send_email.py
+++ b/synapse/handlers/send_email.py
@@ -16,7 +16,15 @@ import email.utils
import logging
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
-from typing import TYPE_CHECKING
+from io import BytesIO
+from typing import TYPE_CHECKING, Optional
+
+from pkg_resources import parse_version
+
+import twisted
+from twisted.internet.defer import Deferred
+from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorTCP
+from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory
from synapse.logging.context import make_deferred_yieldable
@@ -25,20 +33,102 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+_is_old_twisted = parse_version(twisted.__version__) < parse_version("21")
+
+
+class _NoTLSESMTPSender(ESMTPSender):
+ """Extend ESMTPSender to disable TLS
+
+ Unfortunately, before Twisted 21.2, ESMTPSender doesn't give an easy way to disable
+ TLS, so we override its internal method which it uses to generate a context factory.
+ """
+
+ def _getContextFactory(self) -> Optional[IOpenSSLContextFactory]:
+ return None
+
+
+async def _sendmail(
+ reactor: IReactorTCP,
+ smtphost: str,
+ smtpport: int,
+ from_addr: str,
+ to_addr: str,
+ msg_bytes: bytes,
+ username: Optional[bytes] = None,
+ password: Optional[bytes] = None,
+ require_auth: bool = False,
+ require_tls: bool = False,
+ enable_tls: bool = True,
+) -> None:
+ """A simple wrapper around ESMTPSenderFactory, to allow substitution in tests
+
+ Params:
+ reactor: reactor to use to make the outbound connection
+ smtphost: hostname to connect to
+ smtpport: port to connect to
+ from_addr: "From" address for email
+ to_addr: "To" address for email
+ msg_bytes: Message content
+ username: username to authenticate with, if auth is enabled
+ password: password to give when authenticating
+ require_auth: if auth is not offered, fail the request
+ require_tls: if TLS is not offered, fail the reqest
+ enable_tls: True to enable TLS. If this is False and require_tls is True,
+ the request will fail.
+ """
+ msg = BytesIO(msg_bytes)
+ d: "Deferred[object]" = Deferred()
+
+ def build_sender_factory(**kwargs) -> ESMTPSenderFactory:
+ return ESMTPSenderFactory(
+ username,
+ password,
+ from_addr,
+ to_addr,
+ msg,
+ d,
+ heloFallback=True,
+ requireAuthentication=require_auth,
+ requireTransportSecurity=require_tls,
+ **kwargs,
+ )
+
+ if _is_old_twisted:
+ # before twisted 21.2, we have to override the ESMTPSender protocol to disable
+ # TLS
+ factory = build_sender_factory()
+
+ if not enable_tls:
+ factory.protocol = _NoTLSESMTPSender
+ else:
+ # for twisted 21.2 and later, there is a 'hostname' parameter which we should
+ # set to enable TLS.
+ factory = build_sender_factory(hostname=smtphost if enable_tls else None)
+
+ # the IReactorTCP interface claims host has to be a bytes, which seems to be wrong
+ reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type]
+
+ await make_deferred_yieldable(d)
+
class SendEmailHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
- self._sendmail = hs.get_sendmail()
self._reactor = hs.get_reactor()
self._from = hs.config.email.email_notif_from
self._smtp_host = hs.config.email.email_smtp_host
self._smtp_port = hs.config.email.email_smtp_port
- self._smtp_user = hs.config.email.email_smtp_user
- self._smtp_pass = hs.config.email.email_smtp_pass
+
+ user = hs.config.email.email_smtp_user
+ self._smtp_user = user.encode("utf-8") if user is not None else None
+ passwd = hs.config.email.email_smtp_pass
+ self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None
self._require_transport_security = hs.config.email.require_transport_security
+ self._enable_tls = hs.config.email.enable_smtp_tls
+
+ self._sendmail = _sendmail
async def send_email(
self,
@@ -82,17 +172,16 @@ class SendEmailHandler:
logger.info("Sending email to %s" % email_address)
- await make_deferred_yieldable(
- self._sendmail(
- self._smtp_host,
- raw_from,
- raw_to,
- multipart_msg.as_string().encode("utf8"),
- reactor=self._reactor,
- port=self._smtp_port,
- requireAuthentication=self._smtp_user is not None,
- username=self._smtp_user,
- password=self._smtp_pass,
- requireTransportSecurity=self._require_transport_security,
- )
+ await self._sendmail(
+ self._reactor,
+ self._smtp_host,
+ self._smtp_port,
+ raw_from,
+ raw_to,
+ multipart_msg.as_string().encode("utf8"),
+ username=self._smtp_user,
+ password=self._smtp_pass,
+ require_auth=self._smtp_user is not None,
+ require_tls=self._require_transport_security,
+ enable_tls=self._enable_tls,
)
diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py
deleted file mode 100644
index 5f7d4602bd..0000000000
--- a/synapse/handlers/space_summary.py
+++ /dev/null
@@ -1,667 +0,0 @@
-# Copyright 2021 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import itertools
-import logging
-import re
-from collections import deque
-from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple
-
-import attr
-
-from synapse.api.constants import (
- EventContentFields,
- EventTypes,
- HistoryVisibility,
- JoinRules,
- Membership,
- RoomTypes,
-)
-from synapse.events import EventBase
-from synapse.events.utils import format_event_for_client_v2
-from synapse.types import JsonDict
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-# number of rooms to return. We'll stop once we hit this limit.
-MAX_ROOMS = 50
-
-# max number of events to return per room.
-MAX_ROOMS_PER_SPACE = 50
-
-# max number of federation servers to hit per room
-MAX_SERVERS_PER_SPACE = 3
-
-
-class SpaceSummaryHandler:
- def __init__(self, hs: "HomeServer"):
- self._clock = hs.get_clock()
- self._auth = hs.get_auth()
- self._event_auth_handler = hs.get_event_auth_handler()
- self._store = hs.get_datastore()
- self._event_serializer = hs.get_event_client_serializer()
- self._server_name = hs.hostname
- self._federation_client = hs.get_federation_client()
-
- async def get_space_summary(
- self,
- requester: str,
- room_id: str,
- suggested_only: bool = False,
- max_rooms_per_space: Optional[int] = None,
- ) -> JsonDict:
- """
- Implementation of the space summary C-S API
-
- Args:
- requester: user id of the user making this request
-
- room_id: room id to start the summary at
-
- suggested_only: whether we should only return children with the "suggested"
- flag set.
-
- max_rooms_per_space: an optional limit on the number of child rooms we will
- return. This does not apply to the root room (ie, room_id), and
- is overridden by MAX_ROOMS_PER_SPACE.
-
- Returns:
- summary dict to return
- """
- # first of all, check that the user is in the room in question (or it's
- # world-readable)
- await self._auth.check_user_in_room_or_world_readable(room_id, requester)
-
- # the queue of rooms to process
- room_queue = deque((_RoomQueueEntry(room_id, ()),))
-
- # rooms we have already processed
- processed_rooms: Set[str] = set()
-
- # events we have already processed. We don't necessarily have their event ids,
- # so instead we key on (room id, state key)
- processed_events: Set[Tuple[str, str]] = set()
-
- rooms_result: List[JsonDict] = []
- events_result: List[JsonDict] = []
-
- while room_queue and len(rooms_result) < MAX_ROOMS:
- queue_entry = room_queue.popleft()
- room_id = queue_entry.room_id
- if room_id in processed_rooms:
- # already done this room
- continue
-
- logger.debug("Processing room %s", room_id)
-
- is_in_room = await self._store.is_host_joined(room_id, self._server_name)
-
- # The client-specified max_rooms_per_space limit doesn't apply to the
- # room_id specified in the request, so we ignore it if this is the
- # first room we are processing.
- max_children = max_rooms_per_space if processed_rooms else None
-
- if is_in_room:
- room, events = await self._summarize_local_room(
- requester, None, room_id, suggested_only, max_children
- )
-
- logger.debug(
- "Query of local room %s returned events %s",
- room_id,
- ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
- )
-
- if room:
- rooms_result.append(room)
- else:
- fed_rooms, fed_events = await self._summarize_remote_room(
- queue_entry,
- suggested_only,
- max_children,
- exclude_rooms=processed_rooms,
- )
-
- # The results over federation might include rooms that the we,
- # as the requesting server, are allowed to see, but the requesting
- # user is not permitted see.
- #
- # Filter the returned results to only what is accessible to the user.
- room_ids = set()
- events = []
- for room in fed_rooms:
- fed_room_id = room.get("room_id")
- if not fed_room_id or not isinstance(fed_room_id, str):
- continue
-
- # The room should only be included in the summary if:
- # a. the user is in the room;
- # b. the room is world readable; or
- # c. the user could join the room, e.g. the join rules
- # are set to public or the user is in a space that
- # has been granted access to the room.
- #
- # Note that we know the user is not in the root room (which is
- # why the remote call was made in the first place), but the user
- # could be in one of the children rooms and we just didn't know
- # about the link.
-
- # The API doesn't return the room version so assume that a
- # join rule of knock is valid.
- include_room = (
- room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
- or room.get("world_readable") is True
- )
-
- # Check if the user is a member of any of the allowed spaces
- # from the response.
- allowed_rooms = room.get("allowed_spaces")
- if (
- not include_room
- and allowed_rooms
- and isinstance(allowed_rooms, list)
- ):
- include_room = await self._event_auth_handler.is_user_in_rooms(
- allowed_rooms, requester
- )
-
- # Finally, if this isn't the requested room, check ourselves
- # if we can access the room.
- if not include_room and fed_room_id != queue_entry.room_id:
- include_room = await self._is_room_accessible(
- fed_room_id, requester, None
- )
-
- # The user can see the room, include it!
- if include_room:
- rooms_result.append(room)
- room_ids.add(fed_room_id)
-
- # All rooms returned don't need visiting again (even if the user
- # didn't have access to them).
- processed_rooms.add(fed_room_id)
-
- for event in fed_events:
- if event.get("room_id") in room_ids:
- events.append(event)
-
- logger.debug(
- "Query of %s returned rooms %s, events %s",
- room_id,
- [room.get("room_id") for room in fed_rooms],
- ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in fed_events],
- )
-
- # the room we queried may or may not have been returned, but don't process
- # it again, anyway.
- processed_rooms.add(room_id)
-
- # XXX: is it ok that we blindly iterate through any events returned by
- # a remote server, whether or not they actually link to any rooms in our
- # tree?
- for ev in events:
- # remote servers might return events we have already processed
- # (eg, Dendrite returns inward pointers as well as outward ones), so
- # we need to filter them out, to avoid returning duplicate links to the
- # client.
- ev_key = (ev["room_id"], ev["state_key"])
- if ev_key in processed_events:
- continue
- events_result.append(ev)
-
- # add the child to the queue. we have already validated
- # that the vias are a list of server names.
- room_queue.append(
- _RoomQueueEntry(ev["state_key"], ev["content"]["via"])
- )
- processed_events.add(ev_key)
-
- # Before returning to the client, remove the allowed_spaces key for any
- # rooms.
- for room in rooms_result:
- room.pop("allowed_spaces", None)
-
- return {"rooms": rooms_result, "events": events_result}
-
- async def federation_space_summary(
- self,
- origin: str,
- room_id: str,
- suggested_only: bool,
- max_rooms_per_space: Optional[int],
- exclude_rooms: Iterable[str],
- ) -> JsonDict:
- """
- Implementation of the space summary Federation API
-
- Args:
- origin: The server requesting the spaces summary.
-
- room_id: room id to start the summary at
-
- suggested_only: whether we should only return children with the "suggested"
- flag set.
-
- max_rooms_per_space: an optional limit on the number of child rooms we will
- return. Unlike the C-S API, this applies to the root room (room_id).
- It is clipped to MAX_ROOMS_PER_SPACE.
-
- exclude_rooms: a list of rooms to skip over (presumably because the
- calling server has already seen them).
-
- Returns:
- summary dict to return
- """
- # the queue of rooms to process
- room_queue = deque((room_id,))
-
- # the set of rooms that we should not walk further. Initialise it with the
- # excluded-rooms list; we will add other rooms as we process them so that
- # we do not loop.
- processed_rooms: Set[str] = set(exclude_rooms)
-
- rooms_result: List[JsonDict] = []
- events_result: List[JsonDict] = []
-
- while room_queue and len(rooms_result) < MAX_ROOMS:
- room_id = room_queue.popleft()
- if room_id in processed_rooms:
- # already done this room
- continue
-
- logger.debug("Processing room %s", room_id)
-
- room, events = await self._summarize_local_room(
- None, origin, room_id, suggested_only, max_rooms_per_space
- )
-
- processed_rooms.add(room_id)
-
- if room:
- rooms_result.append(room)
- events_result.extend(events)
-
- # add any children to the queue
- room_queue.extend(edge_event["state_key"] for edge_event in events)
-
- return {"rooms": rooms_result, "events": events_result}
-
- async def _summarize_local_room(
- self,
- requester: Optional[str],
- origin: Optional[str],
- room_id: str,
- suggested_only: bool,
- max_children: Optional[int],
- ) -> Tuple[Optional[JsonDict], Sequence[JsonDict]]:
- """
- Generate a room entry and a list of event entries for a given room.
-
- Args:
- requester:
- The user requesting the summary, if it is a local request. None
- if this is a federation request.
- origin:
- The server requesting the summary, if it is a federation request.
- None if this is a local request.
- room_id: The room ID to summarize.
- suggested_only: True if only suggested children should be returned.
- Otherwise, all children are returned.
- max_children:
- The maximum number of children rooms to include. This is capped
- to a server-set limit.
-
- Returns:
- A tuple of:
- The room information, if the room should be returned to the
- user. None, otherwise.
-
- An iterable of the sorted children events. This may be limited
- to a maximum size or may include all children.
- """
- if not await self._is_room_accessible(room_id, requester, origin):
- return None, ()
-
- room_entry = await self._build_room_entry(room_id)
-
- # If the room is not a space, return just the room information.
- if room_entry.get("room_type") != RoomTypes.SPACE:
- return room_entry, ()
-
- # Otherwise, look for child rooms/spaces.
- child_events = await self._get_child_events(room_id)
-
- if suggested_only:
- # we only care about suggested children
- child_events = filter(_is_suggested_child_event, child_events)
-
- if max_children is None or max_children > MAX_ROOMS_PER_SPACE:
- max_children = MAX_ROOMS_PER_SPACE
-
- now = self._clock.time_msec()
- events_result: List[JsonDict] = []
- for edge_event in itertools.islice(child_events, max_children):
- events_result.append(
- await self._event_serializer.serialize_event(
- edge_event,
- time_now=now,
- event_format=format_event_for_client_v2,
- )
- )
-
- return room_entry, events_result
-
- async def _summarize_remote_room(
- self,
- room: "_RoomQueueEntry",
- suggested_only: bool,
- max_children: Optional[int],
- exclude_rooms: Iterable[str],
- ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
- """
- Request room entries and a list of event entries for a given room by querying a remote server.
-
- Args:
- room: The room to summarize.
- suggested_only: True if only suggested children should be returned.
- Otherwise, all children are returned.
- max_children:
- The maximum number of children rooms to include. This is capped
- to a server-set limit.
- exclude_rooms:
- Rooms IDs which do not need to be summarized.
-
- Returns:
- A tuple of:
- An iterable of rooms.
-
- An iterable of the sorted children events. This may be limited
- to a maximum size or may include all children.
- """
- room_id = room.room_id
- logger.info("Requesting summary for %s via %s", room_id, room.via)
-
- # we need to make the exclusion list json-serialisable
- exclude_rooms = list(exclude_rooms)
-
- via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE)
- try:
- res = await self._federation_client.get_space_summary(
- via,
- room_id,
- suggested_only=suggested_only,
- max_rooms_per_space=max_children,
- exclude_rooms=exclude_rooms,
- )
- except Exception as e:
- logger.warning(
- "Unable to get summary of %s via federation: %s",
- room_id,
- e,
- exc_info=logger.isEnabledFor(logging.DEBUG),
- )
- return (), ()
-
- return res.rooms, tuple(
- ev.data for ev in res.events if ev.event_type == EventTypes.SpaceChild
- )
-
- async def _is_room_accessible(
- self, room_id: str, requester: Optional[str], origin: Optional[str]
- ) -> bool:
- """
- Calculate whether the room should be shown in the spaces summary.
-
- It should be included if:
-
- * The requester is joined or can join the room (per MSC3173).
- * The origin server has any user that is joined or can join the room.
- * The history visibility is set to world readable.
-
- Args:
- room_id: The room ID to summarize.
- requester:
- The user requesting the summary, if it is a local request. None
- if this is a federation request.
- origin:
- The server requesting the summary, if it is a federation request.
- None if this is a local request.
-
- Returns:
- True if the room should be included in the spaces summary.
- """
- state_ids = await self._store.get_current_state_ids(room_id)
-
- # If there's no state for the room, it isn't known.
- if not state_ids:
- # The user might have a pending invite for the room.
- if requester and await self._store.get_invite_for_local_user_in_room(
- requester, room_id
- ):
- return True
-
- logger.info("room %s is unknown, omitting from summary", room_id)
- return False
-
- room_version = await self._store.get_room_version(room_id)
-
- # Include the room if it has join rules of public or knock.
- join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
- if join_rules_event_id:
- join_rules_event = await self._store.get_event(join_rules_event_id)
- join_rule = join_rules_event.content.get("join_rule")
- if join_rule == JoinRules.PUBLIC or (
- room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
- ):
- return True
-
- # Include the room if it is peekable.
- hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""))
- if hist_vis_event_id:
- hist_vis_ev = await self._store.get_event(hist_vis_event_id)
- hist_vis = hist_vis_ev.content.get("history_visibility")
- if hist_vis == HistoryVisibility.WORLD_READABLE:
- return True
-
- # Otherwise we need to check information specific to the user or server.
-
- # If we have an authenticated requesting user, check if they are a member
- # of the room (or can join the room).
- if requester:
- member_event_id = state_ids.get((EventTypes.Member, requester), None)
-
- # If they're in the room they can see info on it.
- if member_event_id:
- member_event = await self._store.get_event(member_event_id)
- if member_event.membership in (Membership.JOIN, Membership.INVITE):
- return True
-
- # Otherwise, check if they should be allowed access via membership in a space.
- if await self._event_auth_handler.has_restricted_join_rules(
- state_ids, room_version
- ):
- allowed_rooms = (
- await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
- )
- if await self._event_auth_handler.is_user_in_rooms(
- allowed_rooms, requester
- ):
- return True
-
- # If this is a request over federation, check if the host is in the room or
- # has a user who could join the room.
- elif origin:
- if await self._event_auth_handler.check_host_in_room(
- room_id, origin
- ) or await self._store.is_host_invited(room_id, origin):
- return True
-
- # Alternately, if the host has a user in any of the spaces specified
- # for access, then the host can see this room (and should do filtering
- # if the requester cannot see it).
- if await self._event_auth_handler.has_restricted_join_rules(
- state_ids, room_version
- ):
- allowed_rooms = (
- await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
- )
- for space_id in allowed_rooms:
- if await self._event_auth_handler.check_host_in_room(
- space_id, origin
- ):
- return True
-
- logger.info(
- "room %s is unpeekable and requester %s is not a member / not allowed to join, omitting from summary",
- room_id,
- requester or origin,
- )
- return False
-
- async def _build_room_entry(self, room_id: str) -> JsonDict:
- """Generate en entry suitable for the 'rooms' list in the summary response"""
- stats = await self._store.get_room_with_stats(room_id)
-
- # currently this should be impossible because we call
- # check_user_in_room_or_world_readable on the room before we get here, so
- # there should always be an entry
- assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
-
- current_state_ids = await self._store.get_current_state_ids(room_id)
- create_event = await self._store.get_event(
- current_state_ids[(EventTypes.Create, "")]
- )
-
- room_version = await self._store.get_room_version(room_id)
- allowed_rooms = None
- if await self._event_auth_handler.has_restricted_join_rules(
- current_state_ids, room_version
- ):
- allowed_rooms = await self._event_auth_handler.get_rooms_that_allow_join(
- current_state_ids
- )
-
- entry = {
- "room_id": stats["room_id"],
- "name": stats["name"],
- "topic": stats["topic"],
- "canonical_alias": stats["canonical_alias"],
- "num_joined_members": stats["joined_members"],
- "avatar_url": stats["avatar"],
- "join_rules": stats["join_rules"],
- "world_readable": (
- stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
- ),
- "guest_can_join": stats["guest_access"] == "can_join",
- "creation_ts": create_event.origin_server_ts,
- "room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
- "allowed_spaces": allowed_rooms,
- }
-
- # Filter out Nones – rather omit the field altogether
- room_entry = {k: v for k, v in entry.items() if v is not None}
-
- return room_entry
-
- async def _get_child_events(self, room_id: str) -> Iterable[EventBase]:
- """
- Get the child events for a given room.
-
- The returned results are sorted for stability.
-
- Args:
- room_id: The room id to get the children of.
-
- Returns:
- An iterable of sorted child events.
- """
-
- # look for child rooms/spaces.
- current_state_ids = await self._store.get_current_state_ids(room_id)
-
- events = await self._store.get_events_as_list(
- [
- event_id
- for key, event_id in current_state_ids.items()
- if key[0] == EventTypes.SpaceChild
- ]
- )
-
- # filter out any events without a "via" (which implies it has been redacted),
- # and order to ensure we return stable results.
- return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key)
-
-
-@attr.s(frozen=True, slots=True)
-class _RoomQueueEntry:
- room_id = attr.ib(type=str)
- via = attr.ib(type=Sequence[str])
-
-
-def _has_valid_via(e: EventBase) -> bool:
- via = e.content.get("via")
- if not via or not isinstance(via, Sequence):
- return False
- for v in via:
- if not isinstance(v, str):
- logger.debug("Ignoring edge event %s with invalid via entry", e.event_id)
- return False
- return True
-
-
-def _is_suggested_child_event(edge_event: EventBase) -> bool:
- suggested = edge_event.content.get("suggested")
- if isinstance(suggested, bool) and suggested:
- return True
- logger.debug("Ignorning not-suggested child %s", edge_event.state_key)
- return False
-
-
-# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive.
-_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
-
-
-def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
- """
- Generate a value for comparing two child events for ordering.
-
- The rules for ordering are supposed to be:
-
- 1. The 'order' key, if it is valid.
- 2. The 'origin_server_ts' of the 'm.room.create' event.
- 3. The 'room_id'.
-
- But we skip step 2 since we may not have any state from the room.
-
- Args:
- child: The event for generating a comparison key.
-
- Returns:
- The comparison key as a tuple of:
- False if the ordering is valid.
- The ordering field.
- The room ID.
- """
- order = child.content.get("order")
- # If order is not a string or doesn't meet the requirements, ignore it.
- if not isinstance(order, str):
- order = None
- elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
- order = None
-
- # Items without an order come last.
- return (order is None, order, child.room_id)
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 1b855a685c..0e6ebb574e 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -37,6 +37,7 @@ from twisted.web.server import Request
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
from synapse.config.sso import SsoAttributeRequirement
+from synapse.handlers.register import init_counters_for_auth_provider
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http import get_request_user_agent
from synapse.http.server import respond_with_html, respond_with_redirect
@@ -213,6 +214,7 @@ class SsoHandler:
p_id = p.idp_id
assert p_id not in self._identity_providers
self._identity_providers[p_id] = p
+ init_counters_for_auth_provider(p_id)
def get_identity_providers(self) -> Mapping[str, SsoIdentityProvider]:
"""Get the configured identity providers"""
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 3fd89af2a4..3a4c41c9ff 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple
from typing_extensions import Counter as CounterType
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import JsonDict
@@ -273,7 +273,9 @@ class StatsHandler:
elif typ == EventTypes.CanonicalAlias:
room_state["canonical_alias"] = event_content.get("alias")
elif typ == EventTypes.GuestAccess:
- room_state["guest_access"] = event_content.get("guest_access")
+ room_state["guest_access"] = event_content.get(
+ EventContentFields.GUEST_ACCESS
+ )
for room_id, state in room_to_state_updates.items():
logger.debug("Updating room_stats_state for %s: %s", room_id, state)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index f30bfcc93c..e017b28cd2 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1,5 +1,4 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2018, 2019 New Vector Ltd
+# Copyright 2015-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,6 +30,8 @@ from prometheus_client import Counter
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
from synapse.api.filtering import FilterCollection
+from synapse.api.presence import UserPresenceState
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.logging.context import current_context
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
@@ -86,20 +87,20 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
SyncRequestKey = Tuple[Any, ...]
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class SyncConfig:
- user = attr.ib(type=UserID)
- filter_collection = attr.ib(type=FilterCollection)
- is_guest = attr.ib(type=bool)
- request_key = attr.ib(type=SyncRequestKey)
- device_id = attr.ib(type=Optional[str])
+ user: UserID
+ filter_collection: FilterCollection
+ is_guest: bool
+ request_key: SyncRequestKey
+ device_id: Optional[str]
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class TimelineBatch:
- prev_batch = attr.ib(type=StreamToken)
- events = attr.ib(type=List[EventBase])
- limited = attr.ib(type=bool)
+ prev_batch: StreamToken
+ events: List[EventBase]
+ limited: bool
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
@@ -113,16 +114,16 @@ class TimelineBatch:
# if there are updates for it, which we check after the instance has been created.
# This should not be a big deal because we update the notification counts afterwards as
# well anyway.
-@attr.s(slots=True)
+@attr.s(slots=True, auto_attribs=True)
class JoinedSyncResult:
- room_id = attr.ib(type=str)
- timeline = attr.ib(type=TimelineBatch)
- state = attr.ib(type=StateMap[EventBase])
- ephemeral = attr.ib(type=List[JsonDict])
- account_data = attr.ib(type=List[JsonDict])
- unread_notifications = attr.ib(type=JsonDict)
- summary = attr.ib(type=Optional[JsonDict])
- unread_count = attr.ib(type=int)
+ room_id: str
+ timeline: TimelineBatch
+ state: StateMap[EventBase]
+ ephemeral: List[JsonDict]
+ account_data: List[JsonDict]
+ unread_notifications: JsonDict
+ summary: Optional[JsonDict]
+ unread_count: int
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
@@ -138,12 +139,12 @@ class JoinedSyncResult:
)
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class ArchivedSyncResult:
- room_id = attr.ib(type=str)
- timeline = attr.ib(type=TimelineBatch)
- state = attr.ib(type=StateMap[EventBase])
- account_data = attr.ib(type=List[JsonDict])
+ room_id: str
+ timeline: TimelineBatch
+ state: StateMap[EventBase]
+ account_data: List[JsonDict]
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
@@ -152,37 +153,37 @@ class ArchivedSyncResult:
return bool(self.timeline or self.state or self.account_data)
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class InvitedSyncResult:
- room_id = attr.ib(type=str)
- invite = attr.ib(type=EventBase)
+ room_id: str
+ invite: EventBase
def __bool__(self) -> bool:
"""Invited rooms should always be reported to the client"""
return True
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class KnockedSyncResult:
- room_id = attr.ib(type=str)
- knock = attr.ib(type=EventBase)
+ room_id: str
+ knock: EventBase
def __bool__(self) -> bool:
"""Knocked rooms should always be reported to the client"""
return True
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class GroupsSyncResult:
- join = attr.ib(type=JsonDict)
- invite = attr.ib(type=JsonDict)
- leave = attr.ib(type=JsonDict)
+ join: JsonDict
+ invite: JsonDict
+ leave: JsonDict
def __bool__(self) -> bool:
return bool(self.join or self.invite or self.leave)
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class DeviceLists:
"""
Attributes:
@@ -190,27 +191,27 @@ class DeviceLists:
left: List of user_ids whose devices we no longer track
"""
- changed = attr.ib(type=Collection[str])
- left = attr.ib(type=Collection[str])
+ changed: Collection[str]
+ left: Collection[str]
def __bool__(self) -> bool:
return bool(self.changed or self.left)
-@attr.s(slots=True)
+@attr.s(slots=True, auto_attribs=True)
class _RoomChanges:
"""The set of room entries to include in the sync, plus the set of joined
and left room IDs since last sync.
"""
- room_entries = attr.ib(type=List["RoomSyncResultBuilder"])
- invited = attr.ib(type=List[InvitedSyncResult])
- knocked = attr.ib(type=List[KnockedSyncResult])
- newly_joined_rooms = attr.ib(type=List[str])
- newly_left_rooms = attr.ib(type=List[str])
+ room_entries: List["RoomSyncResultBuilder"]
+ invited: List[InvitedSyncResult]
+ knocked: List[KnockedSyncResult]
+ newly_joined_rooms: List[str]
+ newly_left_rooms: List[str]
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class SyncResult:
"""
Attributes:
@@ -230,18 +231,18 @@ class SyncResult:
groups: Group updates, if any
"""
- next_batch = attr.ib(type=StreamToken)
- presence = attr.ib(type=List[JsonDict])
- account_data = attr.ib(type=List[JsonDict])
- joined = attr.ib(type=List[JoinedSyncResult])
- invited = attr.ib(type=List[InvitedSyncResult])
- knocked = attr.ib(type=List[KnockedSyncResult])
- archived = attr.ib(type=List[ArchivedSyncResult])
- to_device = attr.ib(type=List[JsonDict])
- device_lists = attr.ib(type=DeviceLists)
- device_one_time_keys_count = attr.ib(type=JsonDict)
- device_unused_fallback_key_types = attr.ib(type=List[str])
- groups = attr.ib(type=Optional[GroupsSyncResult])
+ next_batch: StreamToken
+ presence: List[UserPresenceState]
+ account_data: List[JsonDict]
+ joined: List[JoinedSyncResult]
+ invited: List[InvitedSyncResult]
+ knocked: List[KnockedSyncResult]
+ archived: List[ArchivedSyncResult]
+ to_device: List[JsonDict]
+ device_lists: DeviceLists
+ device_one_time_keys_count: JsonDict
+ device_unused_fallback_key_types: List[str]
+ groups: Optional[GroupsSyncResult]
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
@@ -269,14 +270,22 @@ class SyncHandler:
self.presence_handler = hs.get_presence_handler()
self.event_sources = hs.get_event_sources()
self.clock = hs.get_clock()
- self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
- hs.get_clock(), "sync"
- )
self.state = hs.get_state_handler()
self.auth = hs.get_auth()
self.storage = hs.get_storage()
self.state_store = self.storage.state
+ # TODO: flush cache entries on subsequent sync request.
+ # Once we get the next /sync request (ie, one with the same access token
+ # that sets 'since' to 'next_batch'), we know that device won't need a
+ # cached result any more, and we could flush the entry from the cache to save
+ # memory.
+ self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
+ hs.get_clock(),
+ "sync",
+ timeout_ms=hs.config.caches.sync_response_cache_duration,
+ )
+
# ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
self.lazy_loaded_members_cache: ExpiringCache[
Tuple[str, Optional[str]], LruCache[str, str]
@@ -496,10 +505,13 @@ class SyncHandler:
else:
limited = False
+ log_kv({"limited": limited})
+
if potential_recents:
recents = sync_config.filter_collection.filter_room_timeline(
potential_recents
)
+ log_kv({"recents_after_sync_filtering": len(recents)})
# We check if there are any state events, if there are then we pass
# all current state events to the filter_events function. This is to
@@ -517,6 +529,7 @@ class SyncHandler:
recents,
always_include_ids=current_state_ids,
)
+ log_kv({"recents_after_visibility_filtering": len(recents)})
else:
recents = []
@@ -557,10 +570,15 @@ class SyncHandler:
events, end_key = await self.store.get_recent_events_for_room(
room_id, limit=load_limit + 1, end_token=end_key
)
+
+ log_kv({"loaded_recents": len(events)})
+
loaded_recents = sync_config.filter_collection.filter_room_timeline(
events
)
+ log_kv({"loaded_recents_after_sync_filtering": len(loaded_recents)})
+
# We check if there are any state events, if there are then we pass
# all current state events to the filter_events function. This is to
# ensure that we always include current state in the timeline
@@ -577,6 +595,9 @@ class SyncHandler:
loaded_recents,
always_include_ids=current_state_ids,
)
+
+ log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
+
loaded_recents.extend(recents)
recents = loaded_recents
@@ -693,7 +714,7 @@ class SyncHandler:
name_id = state_ids.get((EventTypes.Name, ""))
canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, ""))
- summary = {}
+ summary: JsonDict = {}
empty_ms = MemberSummary([], 0)
# TODO: only send these when they change.
@@ -1107,6 +1128,8 @@ class SyncHandler:
logger.debug("Fetching group data")
await self._generate_sync_entry_for_groups(sync_result_builder)
+ num_events = 0
+
# debug for https://github.com/matrix-org/synapse/issues/4422
for joined_room in sync_result_builder.joined:
room_id = joined_room.room_id
@@ -1114,6 +1137,14 @@ class SyncHandler:
issue4422_logger.debug(
"Sync result for newly joined room %s: %r", room_id, joined_room
)
+ num_events += len(joined_room.timeline.events)
+
+ log_kv(
+ {
+ "joined_rooms_in_result": len(sync_result_builder.joined),
+ "events_in_result": num_events,
+ }
+ )
logger.debug("Sync response calculation complete")
return SyncResult(
@@ -1458,6 +1489,7 @@ class SyncHandler:
if not sync_result_builder.full_state:
if since_token and not ephemeral_by_room and not account_data_by_room:
have_changed = await self._have_rooms_changed(sync_result_builder)
+ log_kv({"rooms_have_changed": have_changed})
if not have_changed:
tags_by_room = await self.store.get_updated_tags(
user_id, since_token.account_data_key
@@ -1492,25 +1524,30 @@ class SyncHandler:
tags_by_room = await self.store.get_tags_for_user(user_id)
+ log_kv({"rooms_changed": len(room_changes.room_entries)})
+
room_entries = room_changes.room_entries
invited = room_changes.invited
knocked = room_changes.knocked
newly_joined_rooms = room_changes.newly_joined_rooms
newly_left_rooms = room_changes.newly_left_rooms
- async def handle_room_entries(room_entry):
- logger.debug("Generating room entry for %s", room_entry.room_id)
- res = await self._generate_room_entry(
- sync_result_builder,
- ignored_users,
- room_entry,
- ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
- tags=tags_by_room.get(room_entry.room_id),
- account_data=account_data_by_room.get(room_entry.room_id, {}),
- always_include=sync_result_builder.full_state,
- )
- logger.debug("Generated room entry for %s", room_entry.room_id)
- return res
+ async def handle_room_entries(room_entry: "RoomSyncResultBuilder"):
+ with start_active_span("generate_room_entry"):
+ set_tag("room_id", room_entry.room_id)
+ log_kv({"events": len(room_entry.events or [])})
+ logger.debug("Generating room entry for %s", room_entry.room_id)
+ res = await self._generate_room_entry(
+ sync_result_builder,
+ ignored_users,
+ room_entry,
+ ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
+ tags=tags_by_room.get(room_entry.room_id),
+ account_data=account_data_by_room.get(room_entry.room_id, {}),
+ always_include=sync_result_builder.full_state,
+ )
+ logger.debug("Generated room entry for %s", room_entry.room_id)
+ return res
await concurrently_execute(handle_room_entries, room_entries, 10)
@@ -1835,6 +1872,9 @@ class SyncHandler:
knocked = []
for event in room_list:
+ if event.room_version_id not in KNOWN_ROOM_VERSIONS:
+ continue
+
if event.membership == Membership.JOIN:
room_entries.append(
RoomSyncResultBuilder(
@@ -1920,6 +1960,12 @@ class SyncHandler:
room_id = room_builder.room_id
since_token = room_builder.since_token
upto_token = room_builder.upto_token
+ log_kv(
+ {
+ "since_token": since_token,
+ "upto_token": upto_token,
+ }
+ )
batch = await self._load_filtered_recents(
room_id,
@@ -1929,6 +1975,13 @@ class SyncHandler:
potential_recents=events,
newly_joined_room=newly_joined,
)
+ log_kv(
+ {
+ "batch_events": len(batch.events),
+ "prev_batch": batch.prev_batch,
+ "batch_limited": batch.limited,
+ }
+ )
# Note: `batch` can be both empty and limited here in the case where
# `_load_filtered_recents` can't find any events the user should see
@@ -2068,21 +2121,23 @@ class SyncHandler:
# If the membership's stream ordering is after the given stream
# ordering, we need to go and work out if the user was in the room
# before.
- for room_id, event_pos in joined_rooms:
- if not event_pos.persisted_after(room_key):
- joined_room_ids.add(room_id)
+ for joined_room in joined_rooms:
+ if not joined_room.event_pos.persisted_after(room_key):
+ joined_room_ids.add(joined_room.room_id)
continue
- logger.info("User joined room after current token: %s", room_id)
+ logger.info("User joined room after current token: %s", joined_room.room_id)
extrems = (
await self.store.get_forward_extremities_for_room_at_stream_ordering(
- room_id, event_pos.stream
+ joined_room.room_id, joined_room.event_pos.stream
)
)
- users_in_room = await self.state.get_current_users_in_room(room_id, extrems)
+ users_in_room = await self.state.get_current_users_in_room(
+ joined_room.room_id, extrems
+ )
if user_id in users_in_room:
- joined_room_ids.add(room_id)
+ joined_room_ids.add(joined_room.room_id)
return frozenset(joined_room_ids)
@@ -2152,7 +2207,7 @@ def _calculate_state(
return {event_id_to_key[e]: e for e in state_ids}
-@attr.s(slots=True)
+@attr.s(slots=True, auto_attribs=True)
class SyncResultBuilder:
"""Used to help build up a new SyncResult for a user
@@ -2164,33 +2219,33 @@ class SyncResultBuilder:
joined_room_ids: List of rooms the user is joined to
# The following mirror the fields in a sync response
- presence (list)
- account_data (list)
- joined (list[JoinedSyncResult])
- invited (list[InvitedSyncResult])
- knocked (list[KnockedSyncResult])
- archived (list[ArchivedSyncResult])
- groups (GroupsSyncResult|None)
- to_device (list)
+ presence
+ account_data
+ joined
+ invited
+ knocked
+ archived
+ groups
+ to_device
"""
- sync_config = attr.ib(type=SyncConfig)
- full_state = attr.ib(type=bool)
- since_token = attr.ib(type=Optional[StreamToken])
- now_token = attr.ib(type=StreamToken)
- joined_room_ids = attr.ib(type=FrozenSet[str])
+ sync_config: SyncConfig
+ full_state: bool
+ since_token: Optional[StreamToken]
+ now_token: StreamToken
+ joined_room_ids: FrozenSet[str]
- presence = attr.ib(type=List[JsonDict], default=attr.Factory(list))
- account_data = attr.ib(type=List[JsonDict], default=attr.Factory(list))
- joined = attr.ib(type=List[JoinedSyncResult], default=attr.Factory(list))
- invited = attr.ib(type=List[InvitedSyncResult], default=attr.Factory(list))
- knocked = attr.ib(type=List[KnockedSyncResult], default=attr.Factory(list))
- archived = attr.ib(type=List[ArchivedSyncResult], default=attr.Factory(list))
- groups = attr.ib(type=Optional[GroupsSyncResult], default=None)
- to_device = attr.ib(type=List[JsonDict], default=attr.Factory(list))
+ presence: List[UserPresenceState] = attr.Factory(list)
+ account_data: List[JsonDict] = attr.Factory(list)
+ joined: List[JoinedSyncResult] = attr.Factory(list)
+ invited: List[InvitedSyncResult] = attr.Factory(list)
+ knocked: List[KnockedSyncResult] = attr.Factory(list)
+ archived: List[ArchivedSyncResult] = attr.Factory(list)
+ groups: Optional[GroupsSyncResult] = None
+ to_device: List[JsonDict] = attr.Factory(list)
-@attr.s(slots=True)
+@attr.s(slots=True, auto_attribs=True)
class RoomSyncResultBuilder:
"""Stores information needed to create either a `JoinedSyncResult` or
`ArchivedSyncResult`.
@@ -2206,10 +2261,10 @@ class RoomSyncResultBuilder:
upto_token: Latest point to return events from.
"""
- room_id = attr.ib(type=str)
- rtype = attr.ib(type=str)
- events = attr.ib(type=Optional[List[EventBase]])
- newly_joined = attr.ib(type=bool)
- full_state = attr.ib(type=bool)
- since_token = attr.ib(type=Optional[StreamToken])
- upto_token = attr.ib(type=StreamToken)
+ room_id: str
+ rtype: str
+ events: Optional[List[EventBase]]
+ newly_joined: bool
+ full_state: bool
+ since_token: Optional[StreamToken]
+ upto_token: StreamToken
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 0cb651a400..a97c448595 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -335,7 +335,8 @@ class TypingWriterHandler(FollowerTypingHandler):
)
if not is_in_room:
logger.info(
- "Ignoring typing update from %s as we're not in the room",
+ "Ignoring typing update for room %r from server %s as we're not in the room",
+ room_id,
origin,
)
return
diff --git a/synapse/handlers/ui_auth/__init__.py b/synapse/handlers/ui_auth/__init__.py
index 4c3b669fae..13b0c61d2e 100644
--- a/synapse/handlers/ui_auth/__init__.py
+++ b/synapse/handlers/ui_auth/__init__.py
@@ -34,3 +34,8 @@ class UIAuthSessionDataConstants:
# used by validate_user_via_ui_auth to store the mxid of the user we are validating
# for.
REQUEST_USER_ID = "request_user_id"
+
+ # used during registration to store the registration token used (if required) so that:
+ # - we can prevent a token being used twice by one session
+ # - we can 'use up' the token after registration has successfully completed
+ REGISTRATION_TOKEN = "org.matrix.msc3231.login.registration_token"
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index 5414ce77d8..d3828dec6b 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -49,7 +49,7 @@ class UserInteractiveAuthChecker:
clientip: The IP address of the client.
Raises:
- SynapseError if authentication failed
+ LoginError if authentication failed.
Returns:
The result of authentication (to pass back to the client?)
@@ -131,7 +131,9 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
)
if resp_body["success"]:
return True
- raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
+ raise LoginError(
+ 401, "Captcha authentication failed", errcode=Codes.UNAUTHORIZED
+ )
class _BaseThreepidAuthChecker:
@@ -191,7 +193,9 @@ class _BaseThreepidAuthChecker:
raise AssertionError("Unrecognized threepid medium: %s" % (medium,))
if not threepid:
- raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
+ raise LoginError(
+ 401, "Unable to get validated threepid", errcode=Codes.UNAUTHORIZED
+ )
if threepid["medium"] != medium:
raise LoginError(
@@ -237,11 +241,76 @@ class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
return await self._check_threepid("msisdn", authdict)
+class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
+ AUTH_TYPE = LoginType.REGISTRATION_TOKEN
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+ self.hs = hs
+ self._enabled = bool(hs.config.registration_requires_token)
+ self.store = hs.get_datastore()
+
+ def is_enabled(self) -> bool:
+ return self._enabled
+
+ async def check_auth(self, authdict: dict, clientip: str) -> Any:
+ if "token" not in authdict:
+ raise LoginError(400, "Missing registration token", Codes.MISSING_PARAM)
+ if not isinstance(authdict["token"], str):
+ raise LoginError(
+ 400, "Registration token must be a string", Codes.INVALID_PARAM
+ )
+ if "session" not in authdict:
+ raise LoginError(400, "Missing UIA session", Codes.MISSING_PARAM)
+
+ # Get these here to avoid cyclic dependencies
+ from synapse.handlers.ui_auth import UIAuthSessionDataConstants
+
+ auth_handler = self.hs.get_auth_handler()
+
+ session = authdict["session"]
+ token = authdict["token"]
+
+ # If the LoginType.REGISTRATION_TOKEN stage has already been completed,
+ # return early to avoid incrementing `pending` again.
+ stored_token = await auth_handler.get_session_data(
+ session, UIAuthSessionDataConstants.REGISTRATION_TOKEN
+ )
+ if stored_token:
+ if token != stored_token:
+ raise LoginError(
+ 400, "Registration token has changed", Codes.INVALID_PARAM
+ )
+ else:
+ return token
+
+ if await self.store.registration_token_is_valid(token):
+ # Increment pending counter, so that if token has limited uses it
+ # can't be used up by someone else in the meantime.
+ await self.store.set_registration_token_pending(token)
+ # Store the token in the UIA session, so that once registration
+ # is complete `completed` can be incremented.
+ await auth_handler.set_session_data(
+ session,
+ UIAuthSessionDataConstants.REGISTRATION_TOKEN,
+ token,
+ )
+ # The token will be stored as the result of the authentication stage
+ # in ui_auth_sessions_credentials. This allows the pending counter
+ # for tokens to be decremented when expired sessions are deleted.
+ return token
+ else:
+ raise LoginError(
+ 401, "Invalid registration token", errcode=Codes.UNAUTHORIZED
+ )
+
+
INTERACTIVE_AUTH_CHECKERS = [
DummyAuthChecker,
TermsAuthChecker,
RecaptchaAuthChecker,
EmailIdentityAuthChecker,
MsisdnAuthChecker,
+ RegistrationTokenAuthChecker,
]
"""A list of UserInteractiveAuthChecker classes"""
diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py
index 55ea97a07f..9a2684aca4 100644
--- a/synapse/http/additional_resource.py
+++ b/synapse/http/additional_resource.py
@@ -12,8 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import TYPE_CHECKING
+
+from twisted.web.server import Request
+
from synapse.http.server import DirectServeJsonResource
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
class AdditionalResource(DirectServeJsonResource):
"""Resource wrapper for additional_resources
@@ -25,7 +32,7 @@ class AdditionalResource(DirectServeJsonResource):
and exception handling.
"""
- def __init__(self, hs, handler):
+ def __init__(self, hs: "HomeServer", handler):
"""Initialise AdditionalResource
The ``handler`` should return a deferred which completes when it has
@@ -33,14 +40,14 @@ class AdditionalResource(DirectServeJsonResource):
``request.write()``, and call ``request.finish()``.
Args:
- hs (synapse.server.HomeServer): homeserver
+ hs: homeserver
handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
function to be called to handle the request.
"""
super().__init__()
self._handler = handler
- def _async_render(self, request):
+ def _async_render(self, request: Request):
# Cheekily pass the result straight through, so we don't need to worry
# if its an awaitable or not.
return self._handler(request)
diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py
index 17e1c5abb1..c577142268 100644
--- a/synapse/http/connectproxyclient.py
+++ b/synapse/http/connectproxyclient.py
@@ -12,8 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import base64
import logging
+from typing import Optional
+import attr
from zope.interface import implementer
from twisted.internet import defer, protocol
@@ -21,7 +24,6 @@ from twisted.internet.error import ConnectError
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
from twisted.internet.protocol import ClientFactory, Protocol, connectionDone
from twisted.web import http
-from twisted.web.http_headers import Headers
logger = logging.getLogger(__name__)
@@ -30,6 +32,22 @@ class ProxyConnectError(ConnectError):
pass
+@attr.s
+class ProxyCredentials:
+ username_password = attr.ib(type=bytes)
+
+ def as_proxy_authorization_value(self) -> bytes:
+ """
+ Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
+
+ Returns:
+ A transformation of the authentication string the encoded value for
+ a Proxy-Authorization header.
+ """
+ # Encode as base64 and prepend the authorization type
+ return b"Basic " + base64.encodebytes(self.username_password)
+
+
@implementer(IStreamClientEndpoint)
class HTTPConnectProxyEndpoint:
"""An Endpoint implementation which will send a CONNECT request to an http proxy
@@ -46,7 +64,7 @@ class HTTPConnectProxyEndpoint:
proxy_endpoint: the endpoint to use to connect to the proxy
host: hostname that we want to CONNECT to
port: port that we want to connect to
- headers: Extra HTTP headers to include in the CONNECT request
+ proxy_creds: credentials to authenticate at proxy
"""
def __init__(
@@ -55,20 +73,20 @@ class HTTPConnectProxyEndpoint:
proxy_endpoint: IStreamClientEndpoint,
host: bytes,
port: int,
- headers: Headers,
+ proxy_creds: Optional[ProxyCredentials],
):
self._reactor = reactor
self._proxy_endpoint = proxy_endpoint
self._host = host
self._port = port
- self._headers = headers
+ self._proxy_creds = proxy_creds
def __repr__(self):
return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
def connect(self, protocolFactory: ClientFactory):
f = HTTPProxiedClientFactory(
- self._host, self._port, protocolFactory, self._headers
+ self._host, self._port, protocolFactory, self._proxy_creds
)
d = self._proxy_endpoint.connect(f)
# once the tcp socket connects successfully, we need to wait for the
@@ -87,7 +105,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
dst_host: hostname that we want to CONNECT to
dst_port: port that we want to connect to
wrapped_factory: The original Factory
- headers: Extra HTTP headers to include in the CONNECT request
+ proxy_creds: credentials to authenticate at proxy
"""
def __init__(
@@ -95,12 +113,12 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
dst_host: bytes,
dst_port: int,
wrapped_factory: ClientFactory,
- headers: Headers,
+ proxy_creds: Optional[ProxyCredentials],
):
self.dst_host = dst_host
self.dst_port = dst_port
self.wrapped_factory = wrapped_factory
- self.headers = headers
+ self.proxy_creds = proxy_creds
self.on_connection = defer.Deferred()
def startedConnecting(self, connector):
@@ -114,7 +132,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
self.dst_port,
wrapped_protocol,
self.on_connection,
- self.headers,
+ self.proxy_creds,
)
def clientConnectionFailed(self, connector, reason):
@@ -145,7 +163,7 @@ class HTTPConnectProtocol(protocol.Protocol):
connected_deferred: a Deferred which will be callbacked with
wrapped_protocol when the CONNECT completes
- headers: Extra HTTP headers to include in the CONNECT request
+ proxy_creds: credentials to authenticate at proxy
"""
def __init__(
@@ -154,16 +172,16 @@ class HTTPConnectProtocol(protocol.Protocol):
port: int,
wrapped_protocol: Protocol,
connected_deferred: defer.Deferred,
- headers: Headers,
+ proxy_creds: Optional[ProxyCredentials],
):
self.host = host
self.port = port
self.wrapped_protocol = wrapped_protocol
self.connected_deferred = connected_deferred
- self.headers = headers
+ self.proxy_creds = proxy_creds
self.http_setup_client = HTTPConnectSetupClient(
- self.host, self.port, self.headers
+ self.host, self.port, self.proxy_creds
)
self.http_setup_client.on_connected.addCallback(self.proxyConnected)
@@ -205,30 +223,38 @@ class HTTPConnectSetupClient(http.HTTPClient):
Args:
host: The hostname to send in the CONNECT message
port: The port to send in the CONNECT message
- headers: Extra headers to send with the CONNECT message
+ proxy_creds: credentials to authenticate at proxy
"""
- def __init__(self, host: bytes, port: int, headers: Headers):
+ def __init__(
+ self,
+ host: bytes,
+ port: int,
+ proxy_creds: Optional[ProxyCredentials],
+ ):
self.host = host
self.port = port
- self.headers = headers
+ self.proxy_creds = proxy_creds
self.on_connected = defer.Deferred()
def connectionMade(self):
logger.debug("Connected to proxy, sending CONNECT")
self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
- # Send any additional specified headers
- for name, values in self.headers.getAllRawHeaders():
- for value in values:
- self.sendHeader(name, value)
+ # Determine whether we need to set Proxy-Authorization headers
+ if self.proxy_creds:
+ # Set a Proxy-Authorization header
+ self.sendHeader(
+ b"Proxy-Authorization",
+ self.proxy_creds.as_proxy_authorization_value(),
+ )
self.endHeaders()
def handleStatus(self, version: bytes, status: bytes, message: bytes):
logger.debug("Got Status: %s %s %s", status, message, version)
if status != b"200":
- raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
+ raise ProxyConnectError(f"Unexpected status on CONNECT: {status!s}")
def handleEndHeaders(self):
logger.debug("End Headers")
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index c16b7f10e6..1238bfd287 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -14,6 +14,10 @@
import logging
import urllib.parse
from typing import Any, Generator, List, Optional
+from urllib.request import ( # type: ignore[attr-defined]
+ getproxies_environment,
+ proxy_bypass_environment,
+)
from netaddr import AddrFormatError, IPAddress, IPSet
from zope.interface import implementer
@@ -30,9 +34,12 @@ from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResponse
from synapse.crypto.context_factory import FederationPolicyForHTTPS
-from synapse.http.client import BlacklistingAgentWrapper
+from synapse.http import proxyagent
+from synapse.http.client import BlacklistingAgentWrapper, BlacklistingReactorWrapper
+from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
from synapse.http.federation.srv_resolver import Server, SrvResolver
from synapse.http.federation.well_known_resolver import WellKnownResolver
+from synapse.http.proxyagent import ProxyAgent
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.types import ISynapseReactor
from synapse.util import Clock
@@ -57,6 +64,14 @@ class MatrixFederationAgent:
user_agent:
The user agent header to use for federation requests.
+ ip_whitelist: Allowed IP addresses.
+
+ ip_blacklist: Disallowed IP addresses.
+
+ proxy_reactor: twisted reactor to use for connections to the proxy server
+ reactor might have some blacklisting applied (i.e. for DNS queries),
+ but we need unblocked access to the proxy.
+
_srv_resolver:
SrvResolver implementation to use for looking up SRV records. None
to use a default implementation.
@@ -71,11 +86,18 @@ class MatrixFederationAgent:
reactor: ISynapseReactor,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
user_agent: bytes,
+ ip_whitelist: IPSet,
ip_blacklist: IPSet,
_srv_resolver: Optional[SrvResolver] = None,
_well_known_resolver: Optional[WellKnownResolver] = None,
):
- self._reactor = reactor
+ # proxy_reactor is not blacklisted
+ proxy_reactor = reactor
+
+ # We need to use a DNS resolver which filters out blacklisted IP
+ # addresses, to prevent DNS rebinding.
+ reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist)
+
self._clock = Clock(reactor)
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
@@ -83,24 +105,27 @@ class MatrixFederationAgent:
self._pool.cachedConnectionTimeout = 2 * 60
self._agent = Agent.usingEndpointFactory(
- self._reactor,
+ reactor,
MatrixHostnameEndpointFactory(
- reactor, tls_client_options_factory, _srv_resolver
+ reactor,
+ proxy_reactor,
+ tls_client_options_factory,
+ _srv_resolver,
),
pool=self._pool,
)
self.user_agent = user_agent
if _well_known_resolver is None:
- # Note that the name resolver has already been wrapped in a
- # IPBlacklistingResolver by MatrixFederationHttpClient.
_well_known_resolver = WellKnownResolver(
- self._reactor,
+ reactor,
agent=BlacklistingAgentWrapper(
- Agent(
- self._reactor,
+ ProxyAgent(
+ reactor,
+ proxy_reactor,
pool=self._pool,
contextFactory=tls_client_options_factory,
+ use_proxy=True,
),
ip_blacklist=ip_blacklist,
),
@@ -200,10 +225,12 @@ class MatrixHostnameEndpointFactory:
def __init__(
self,
reactor: IReactorCore,
+ proxy_reactor: IReactorCore,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
srv_resolver: Optional[SrvResolver],
):
self._reactor = reactor
+ self._proxy_reactor = proxy_reactor
self._tls_client_options_factory = tls_client_options_factory
if srv_resolver is None:
@@ -211,9 +238,10 @@ class MatrixHostnameEndpointFactory:
self._srv_resolver = srv_resolver
- def endpointForURI(self, parsed_uri):
+ def endpointForURI(self, parsed_uri: URI):
return MatrixHostnameEndpoint(
self._reactor,
+ self._proxy_reactor,
self._tls_client_options_factory,
self._srv_resolver,
parsed_uri,
@@ -227,23 +255,45 @@ class MatrixHostnameEndpoint:
Args:
reactor: twisted reactor to use for underlying requests
+ proxy_reactor: twisted reactor to use for connections to the proxy server.
+ 'reactor' might have some blacklisting applied (i.e. for DNS queries),
+ but we need unblocked access to the proxy.
tls_client_options_factory:
factory to use for fetching client tls options, or none to disable TLS.
srv_resolver: The SRV resolver to use
parsed_uri: The parsed URI that we're wanting to connect to.
+
+ Raises:
+ ValueError if the environment variables contain an invalid proxy specification.
+ RuntimeError if no tls_options_factory is given for a https connection
"""
def __init__(
self,
reactor: IReactorCore,
+ proxy_reactor: IReactorCore,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
srv_resolver: SrvResolver,
parsed_uri: URI,
):
self._reactor = reactor
-
self._parsed_uri = parsed_uri
+ # http_proxy is not needed because federation is always over TLS
+ proxies = getproxies_environment()
+ https_proxy = proxies["https"].encode() if "https" in proxies else None
+ self.no_proxy = proxies["no"] if "no" in proxies else None
+
+ # endpoint and credentials to use to connect to the outbound https proxy, if any.
+ (
+ self._https_proxy_endpoint,
+ self._https_proxy_creds,
+ ) = proxyagent.http_proxy_endpoint(
+ https_proxy,
+ proxy_reactor,
+ tls_client_options_factory,
+ )
+
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
@@ -273,9 +323,33 @@ class MatrixHostnameEndpoint:
host = server.host
port = server.port
+ should_skip_proxy = False
+ if self.no_proxy is not None:
+ should_skip_proxy = proxy_bypass_environment(
+ host.decode(),
+ proxies={"no": self.no_proxy},
+ )
+
+ endpoint: IStreamClientEndpoint
try:
- logger.debug("Connecting to %s:%i", host.decode("ascii"), port)
- endpoint = HostnameEndpoint(self._reactor, host, port)
+ if self._https_proxy_endpoint and not should_skip_proxy:
+ logger.debug(
+ "Connecting to %s:%i via %s",
+ host.decode("ascii"),
+ port,
+ self._https_proxy_endpoint,
+ )
+ endpoint = HTTPConnectProxyEndpoint(
+ self._reactor,
+ self._https_proxy_endpoint,
+ host,
+ port,
+ proxy_creds=self._https_proxy_creds,
+ )
+ else:
+ logger.debug("Connecting to %s:%i", host.decode("ascii"), port)
+ # not using a proxy
+ endpoint = HostnameEndpoint(self._reactor, host, port)
if self._tls_options:
endpoint = wrapClientTLS(self._tls_options, endpoint)
result = await make_deferred_yieldable(
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index b8ed4ec905..f68646fd0d 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -16,7 +16,7 @@
import logging
import random
import time
-from typing import List
+from typing import Callable, Dict, List
import attr
@@ -28,35 +28,35 @@ from synapse.logging.context import make_deferred_yieldable
logger = logging.getLogger(__name__)
-SERVER_CACHE = {}
+SERVER_CACHE: Dict[bytes, List["Server"]] = {}
-@attr.s(slots=True, frozen=True)
+@attr.s(auto_attribs=True, slots=True, frozen=True)
class Server:
"""
Our record of an individual server which can be tried to reach a destination.
Attributes:
- host (bytes): target hostname
- port (int):
- priority (int):
- weight (int):
- expires (int): when the cache should expire this record - in *seconds* since
+ host: target hostname
+ port:
+ priority:
+ weight:
+ expires: when the cache should expire this record - in *seconds* since
the epoch
"""
- host = attr.ib()
- port = attr.ib()
- priority = attr.ib(default=0)
- weight = attr.ib(default=0)
- expires = attr.ib(default=0)
+ host: bytes
+ port: int
+ priority: int = 0
+ weight: int = 0
+ expires: int = 0
-def _sort_server_list(server_list):
+def _sort_server_list(server_list: List[Server]) -> List[Server]:
"""Given a list of SRV records sort them into priority order and shuffle
each priority with the given weight.
"""
- priority_map = {}
+ priority_map: Dict[int, List[Server]] = {}
for server in server_list:
priority_map.setdefault(server.priority, []).append(server)
@@ -103,11 +103,16 @@ class SrvResolver:
Args:
dns_client (twisted.internet.interfaces.IResolver): twisted resolver impl
- cache (dict): cache object
- get_time (callable): clock implementation. Should return seconds since the epoch
+ cache: cache object
+ get_time: clock implementation. Should return seconds since the epoch
"""
- def __init__(self, dns_client=client, cache=SERVER_CACHE, get_time=time.time):
+ def __init__(
+ self,
+ dns_client=client,
+ cache: Dict[bytes, List[Server]] = SERVER_CACHE,
+ get_time: Callable[[], float] = time.time,
+ ):
self._dns_client = dns_client
self._cache = cache
self._get_time = get_time
@@ -116,7 +121,7 @@ class SrvResolver:
"""Look up a SRV record
Args:
- service_name (bytes): record to look up
+ service_name: record to look up
Returns:
a list of the SRV records, or an empty list if none found
@@ -158,7 +163,7 @@ class SrvResolver:
and answers[0].payload
and answers[0].payload.target == dns.Name(b".")
):
- raise ConnectError("Service %s unavailable" % service_name)
+ raise ConnectError(f"Service {service_name!r} unavailable")
servers = []
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 2efa15bf04..2e9898997c 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -59,7 +59,6 @@ from synapse.api.errors import (
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
BlacklistingAgentWrapper,
- BlacklistingReactorWrapper,
BodyExceededMaxSize,
ByteWriteable,
encode_query_args,
@@ -69,7 +68,7 @@ from synapse.http.federation.matrix_federation_agent import MatrixFederationAgen
from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
-from synapse.types import ISynapseReactor, JsonDict
+from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
@@ -325,13 +324,7 @@ class MatrixFederationHttpClient:
self.signing_key = hs.signing_key
self.server_name = hs.hostname
- # We need to use a DNS resolver which filters out blacklisted IP
- # addresses, to prevent DNS rebinding.
- self.reactor: ISynapseReactor = BlacklistingReactorWrapper(
- hs.get_reactor(),
- hs.config.federation_ip_range_whitelist,
- hs.config.federation_ip_range_blacklist,
- )
+ self.reactor = hs.get_reactor()
user_agent = hs.version_string
if hs.config.user_agent_suffix:
@@ -342,6 +335,7 @@ class MatrixFederationHttpClient:
self.reactor,
tls_client_options_factory,
user_agent,
+ hs.config.federation_ip_range_whitelist,
hs.config.federation_ip_range_blacklist,
)
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index 19e987f118..6fd88bde20 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import base64
import logging
import re
from typing import Any, Dict, Optional, Tuple
@@ -21,7 +20,6 @@ from urllib.request import ( # type: ignore[attr-defined]
proxy_bypass_environment,
)
-import attr
from zope.interface import implementer
from twisted.internet import defer
@@ -38,7 +36,7 @@ from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
-from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
+from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
from synapse.types import ISynapseReactor
logger = logging.getLogger(__name__)
@@ -46,22 +44,6 @@ logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
-@attr.s
-class ProxyCredentials:
- username_password = attr.ib(type=bytes)
-
- def as_proxy_authorization_value(self) -> bytes:
- """
- Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
-
- Returns:
- A transformation of the authentication string the encoded value for
- a Proxy-Authorization header.
- """
- # Encode as base64 and prepend the authorization type
- return b"Basic " + base64.encodebytes(self.username_password)
-
-
@implementer(IAgent)
class ProxyAgent(_AgentBase):
"""An Agent implementation which will use an HTTP proxy if one was requested
@@ -95,6 +77,7 @@ class ProxyAgent(_AgentBase):
Raises:
ValueError if use_proxy is set and the environment variables
contain an invalid proxy specification.
+ RuntimeError if no tls_options_factory is given for a https connection
"""
def __init__(
@@ -131,11 +114,11 @@ class ProxyAgent(_AgentBase):
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
- self.http_proxy_endpoint, self.http_proxy_creds = _http_proxy_endpoint(
+ self.http_proxy_endpoint, self.http_proxy_creds = http_proxy_endpoint(
http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
)
- self.https_proxy_endpoint, self.https_proxy_creds = _http_proxy_endpoint(
+ self.https_proxy_endpoint, self.https_proxy_creds = http_proxy_endpoint(
https_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs
)
@@ -190,7 +173,7 @@ class ProxyAgent(_AgentBase):
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
- pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
+ pool_key = f"{parsed_uri.scheme!r}{parsed_uri.host!r}{parsed_uri.port}"
request_path = parsed_uri.originForm
should_skip_proxy = False
@@ -216,7 +199,7 @@ class ProxyAgent(_AgentBase):
)
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
- pool_key = ("http-proxy", self.http_proxy_endpoint)
+ pool_key = "http-proxy"
endpoint = self.http_proxy_endpoint
request_path = uri
elif (
@@ -224,22 +207,12 @@ class ProxyAgent(_AgentBase):
and self.https_proxy_endpoint
and not should_skip_proxy
):
- connect_headers = Headers()
-
- # Determine whether we need to set Proxy-Authorization headers
- if self.https_proxy_creds:
- # Set a Proxy-Authorization header
- connect_headers.addRawHeader(
- b"Proxy-Authorization",
- self.https_proxy_creds.as_proxy_authorization_value(),
- )
-
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
- headers=connect_headers,
+ self.https_proxy_creds,
)
else:
# not using a proxy
@@ -268,10 +241,10 @@ class ProxyAgent(_AgentBase):
)
-def _http_proxy_endpoint(
+def http_proxy_endpoint(
proxy: Optional[bytes],
reactor: IReactorCore,
- tls_options_factory: IPolicyForHTTPS,
+ tls_options_factory: Optional[IPolicyForHTTPS],
**kwargs,
) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]:
"""Parses an http proxy setting and returns an endpoint for the proxy
@@ -294,6 +267,7 @@ def _http_proxy_endpoint(
Raise:
ValueError if proxy has no hostname or unsupported scheme.
+ RuntimeError if no tls_options_factory is given for a https connection
"""
if proxy is None:
return None, None
@@ -305,8 +279,13 @@ def _http_proxy_endpoint(
proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs)
if scheme == b"https":
- tls_options = tls_options_factory.creatorForNetloc(host, port)
- proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint)
+ if tls_options_factory:
+ tls_options = tls_options_factory.creatorForNetloc(host, port)
+ proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint)
+ else:
+ raise RuntimeError(
+ f"No TLS options for a https connection via proxy {proxy!s}"
+ )
return proxy_endpoint, credentials
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 732a1e6aeb..91ba93372c 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -14,16 +14,28 @@
""" This module contains base REST classes for constructing REST servlets. """
import logging
-from typing import Iterable, List, Mapping, Optional, Sequence, overload
+from typing import (
+ TYPE_CHECKING,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ overload,
+)
from typing_extensions import Literal
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
-from synapse.types import JsonDict
+from synapse.types import JsonDict, RoomAlias, RoomID
from synapse.util import json_decoder
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -560,6 +572,25 @@ def parse_string_from_args(
return strings[0]
+@overload
+def parse_json_value_from_request(request: Request) -> JsonDict:
+ ...
+
+
+@overload
+def parse_json_value_from_request(
+ request: Request, allow_empty_body: Literal[False]
+) -> JsonDict:
+ ...
+
+
+@overload
+def parse_json_value_from_request(
+ request: Request, allow_empty_body: bool = False
+) -> Optional[JsonDict]:
+ ...
+
+
def parse_json_value_from_request(
request: Request, allow_empty_body: bool = False
) -> Optional[JsonDict]:
@@ -663,3 +694,45 @@ class RestServlet:
else:
raise NotImplementedError("RestServlet must register something.")
+
+
+class ResolveRoomIdMixin:
+ def __init__(self, hs: "HomeServer"):
+ self.room_member_handler = hs.get_room_member_handler()
+
+ async def resolve_room_id(
+ self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None
+ ) -> Tuple[str, Optional[List[str]]]:
+ """
+ Resolve a room identifier to a room ID, if necessary.
+
+ This also performanes checks to ensure the room ID is of the proper form.
+
+ Args:
+ room_identifier: The room ID or alias.
+ remote_room_hosts: The potential remote room hosts to use.
+
+ Returns:
+ The resolved room ID.
+
+ Raises:
+ SynapseError if the room ID is of the wrong form.
+ """
+ if RoomID.is_valid(room_identifier):
+ resolved_room_id = room_identifier
+ elif RoomAlias.is_valid(room_identifier):
+ room_alias = RoomAlias.from_string(room_identifier)
+ (
+ room_id,
+ remote_room_hosts,
+ ) = await self.room_member_handler.lookup_room_alias(room_alias)
+ resolved_room_id = room_id.to_string()
+ else:
+ raise SynapseError(
+ 400, "%s was not legal room ID or room alias" % (room_identifier,)
+ )
+ if not resolved_room_id:
+ raise SynapseError(
+ 400, "Unknown room ID or room alias %s" % room_identifier
+ )
+ return resolved_room_id, remote_room_hosts
diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py
index a6c212f300..af5fc407a8 100644
--- a/synapse/logging/handlers.py
+++ b/synapse/logging/handlers.py
@@ -45,6 +45,7 @@ class PeriodicallyFlushingMemoryHandler(MemoryHandler):
self._flushing_thread: Thread = Thread(
name="PeriodicallyFlushingMemoryHandler flushing thread",
target=self._flush_periodically,
+ daemon=True,
)
self._flushing_thread.start()
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 473812b8e2..b11fa6393b 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -32,6 +32,7 @@ from twisted.internet import defer
from twisted.web.resource import IResource
from synapse.events import EventBase
+from synapse.events.presence_router import PresenceRouter
from synapse.http.client import SimpleHttpClient
from synapse.http.server import (
DirectServeHtmlResource,
@@ -45,7 +46,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.state import StateFilter
-from synapse.types import JsonDict, Requester, UserID, create_requester
+from synapse.types import JsonDict, Requester, UserID, UserInfo, create_requester
from synapse.util import Clock
from synapse.util.caches.descriptors import cached
@@ -57,6 +58,8 @@ This package defines the 'stable' API which can be used by extension modules whi
are loaded into Synapse.
"""
+PRESENCE_ALL_USERS = PresenceRouter.ALL_USERS
+
__all__ = [
"errors",
"make_deferred_yieldable",
@@ -70,6 +73,7 @@ __all__ = [
"DirectServeHtmlResource",
"DirectServeJsonResource",
"ModuleApi",
+ "PRESENCE_ALL_USERS",
]
logger = logging.getLogger(__name__)
@@ -91,6 +95,7 @@ class ModuleApi:
self._state = hs.get_state_handler()
self._clock: Clock = hs.get_clock()
self._send_email_handler = hs.get_send_email_handler()
+ self.custom_template_dir = hs.config.server.custom_template_directory
try:
app_name = self._hs.config.email_app_name
@@ -111,6 +116,7 @@ class ModuleApi:
self._spam_checker = hs.get_spam_checker()
self._account_validity_handler = hs.get_account_validity_handler()
self._third_party_event_rules = hs.get_third_party_event_rules()
+ self._presence_router = hs.get_presence_router()
#################################################################################
# The following methods should only be called during the module's initialisation.
@@ -130,6 +136,11 @@ class ModuleApi:
"""Registers callbacks for third party event rules capabilities."""
return self._third_party_event_rules.register_third_party_rules_callbacks
+ @property
+ def register_presence_router_callbacks(self):
+ """Registers callbacks for presence router capabilities."""
+ return self._presence_router.register_presence_router_callbacks
+
def register_web_resource(self, path: str, resource: IResource):
"""Registers a web resource to be served at the given path.
@@ -174,6 +185,16 @@ class ModuleApi:
"""The application name configured in the homeserver's configuration."""
return self._hs.config.email.email_app_name
+ async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
+ """Get user info by user_id
+
+ Args:
+ user_id: Fully qualified user id.
+ Returns:
+ UserInfo object if a user was found, otherwise None
+ """
+ return await self._store.get_userinfo_by_id(user_id)
+
async def get_user_by_req(
self,
req: SynapseRequest,
@@ -593,10 +614,15 @@ class ModuleApi:
msec: float,
*args,
desc: Optional[str] = None,
+ run_on_all_instances: bool = False,
**kwargs,
):
"""Wraps a function as a background process and calls it repeatedly.
+ NOTE: Will only run on the instance that is configured to run
+ background processes (which is the main process by default), unless
+ `run_on_all_workers` is set.
+
Waits `msec` initially before calling `f` for the first time.
Args:
@@ -607,12 +633,14 @@ class ModuleApi:
msec: How long to wait between calls in milliseconds.
*args: Positional arguments to pass to function.
desc: The background task's description. Default to the function's name.
+ run_on_all_instances: Whether to run this on all instances, rather
+ than just the instance configured to run background tasks.
**kwargs: Key arguments to pass to function.
"""
if desc is None:
desc = f.__name__
- if self._hs.config.run_background_tasks:
+ if self._hs.config.run_background_tasks or run_on_all_instances:
self._clock.looping_call(
run_as_background_process,
msec,
@@ -667,7 +695,10 @@ class ModuleApi:
A list containing the loaded templates, with the orders matching the one of
the filenames parameter.
"""
- return self._hs.config.read_templates(filenames, custom_template_directory)
+ return self._hs.config.read_templates(
+ filenames,
+ (td for td in (self.custom_template_dir, custom_template_directory) if td),
+ )
class PublicRoomListManager:
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 941fb238b7..b0834720ad 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -258,7 +258,7 @@ class Mailer:
# actually sort our so-called rooms_in_order list, most recent room first
rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1]["received_ts"] or 0))
- rooms = []
+ rooms: List[Dict[str, Any]] = []
for r in rooms_in_order:
roomvars = await self._get_room_vars(
@@ -362,6 +362,7 @@ class Mailer:
"notifs": [],
"invite": is_invite,
"link": self._make_room_link(room_id),
+ "avatar_url": await self._get_room_avatar(room_state_ids),
}
if not is_invite:
@@ -393,6 +394,27 @@ class Mailer:
return room_vars
+ async def _get_room_avatar(
+ self,
+ room_state_ids: StateMap[str],
+ ) -> Optional[str]:
+ """
+ Retrieve the avatar url for this room---if it exists.
+
+ Args:
+ room_state_ids: The event IDs of the current room state.
+
+ Returns:
+ room's avatar url if it's present and a string; otherwise None.
+ """
+ event_id = room_state_ids.get((EventTypes.RoomAvatar, ""))
+ if event_id:
+ ev = await self.store.get_event(event_id)
+ url = ev.content.get("url")
+ if isinstance(url, str):
+ return url
+ return None
+
async def _get_notif_vars(
self,
notif: Dict[str, Any],
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index cdcbdd772b..154e5b7028 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -48,7 +48,8 @@ logger = logging.getLogger(__name__)
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
REQUIREMENTS = [
- "jsonschema>=2.5.1",
+ # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
+ "jsonschema>=3.0.0",
"frozendict>=1",
"unpaddedbase64>=1.1.0",
"canonicaljson>=1.4.0",
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 79cadb7b57..a0b3145f4e 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -62,7 +62,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.clock = hs.get_clock()
- self.federation_handler = hs.get_federation_handler()
+ self.federation_event_handler = hs.get_federation_event_handler()
@staticmethod
async def _serialize_payload(store, room_id, event_and_contexts, backfilled):
@@ -127,7 +127,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
logger.info("Got %d events from federation", len(event_and_contexts))
- max_stream_id = await self.federation_handler.persist_events_and_notify(
+ max_stream_id = await self.federation_event_handler.persist_events_and_notify(
room_id, event_and_contexts, backfilled
)
diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py
deleted file mode 100644
index 8cc6de3f46..0000000000
--- a/synapse/replication/slave/storage/room.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from synapse.replication.tcp.streams import PublicRoomsStream
-from synapse.storage.database import DatabasePool
-from synapse.storage.databases.main.room import RoomWorkerStore
-
-from ._base import BaseSlavedStore
-from ._slaved_id_tracker import SlavedIdTracker
-
-
-class RoomStore(RoomWorkerStore, BaseSlavedStore):
- def __init__(self, database: DatabasePool, db_conn, hs):
- super().__init__(database, db_conn, hs)
- self._public_room_id_gen = SlavedIdTracker(
- db_conn, "public_room_list_stream", "stream_id"
- )
-
- def get_current_public_room_stream_id(self):
- return self._public_room_id_gen.get_current_token()
-
- def process_replication_rows(self, stream_name, instance_name, token, rows):
- if stream_name == PublicRoomsStream.NAME:
- self._public_room_id_gen.advance(instance_name, token)
-
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py
index 4c0023c68a..f41eabd85e 100644
--- a/synapse/replication/tcp/streams/__init__.py
+++ b/synapse/replication/tcp/streams/__init__.py
@@ -32,7 +32,6 @@ from synapse.replication.tcp.streams._base import (
GroupServerStream,
PresenceFederationStream,
PresenceStream,
- PublicRoomsStream,
PushersStream,
PushRulesStream,
ReceiptsStream,
@@ -57,7 +56,6 @@ STREAMS_MAP = {
PushRulesStream,
PushersStream,
CachesStream,
- PublicRoomsStream,
DeviceListsStream,
ToDeviceStream,
FederationStream,
@@ -79,7 +77,6 @@ __all__ = [
"PushRulesStream",
"PushersStream",
"CachesStream",
- "PublicRoomsStream",
"DeviceListsStream",
"ToDeviceStream",
"TagAccountDataStream",
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 3716c41bea..9b905aba9d 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -447,31 +447,6 @@ class CachesStream(Stream):
)
-class PublicRoomsStream(Stream):
- """The public rooms list changed"""
-
- PublicRoomsStreamRow = namedtuple(
- "PublicRoomsStreamRow",
- (
- "room_id", # str
- "visibility", # str
- "appservice_id", # str, optional
- "network_id", # str, optional
- ),
- )
-
- NAME = "public_rooms"
- ROW_TYPE = PublicRoomsStreamRow
-
- def __init__(self, hs):
- store = hs.get_datastore()
- super().__init__(
- hs.get_instance_name(),
- current_token_without_instance(store.get_current_public_room_stream_id),
- store.get_all_new_public_rooms,
- )
-
-
class DeviceListsStream(Stream):
"""Either a user has updated their devices or a remote server needs to be
told about a device update.
diff --git a/synapse/res/providers.json b/synapse/res/providers.json
new file mode 100644
index 0000000000..f1838f9559
--- /dev/null
+++ b/synapse/res/providers.json
@@ -0,0 +1,17 @@
+[
+ {
+ "provider_name": "Twitter",
+ "provider_url": "http://www.twitter.com/",
+ "endpoints": [
+ {
+ "schemes": [
+ "https://twitter.com/*/status/*",
+ "https://*.twitter.com/*/status/*",
+ "https://twitter.com/*/moments/*",
+ "https://*.twitter.com/*/moments/*"
+ ],
+ "url": "https://publish.twitter.com/oembed"
+ }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html
index 63944dc608..b3db06ef97 100644
--- a/synapse/res/templates/recaptcha.html
+++ b/synapse/res/templates/recaptcha.html
@@ -16,6 +16,9 @@ function captchaDone() {
<body>
<form id="registrationForm" method="post" action="{{ myurl }}">
<div>
+ {% if error is defined %}
+ <p class="error"><strong>Error: {{ error }}</strong></p>
+ {% endif %}
<p>
Hello! We need to prevent computer programs and other automated
things from creating accounts on this server.
diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html
new file mode 100644
index 0000000000..4577ce1702
--- /dev/null
+++ b/synapse/res/templates/registration_token.html
@@ -0,0 +1,23 @@
+<html>
+<head>
+<title>Authentication</title>
+<meta name='viewport' content='width=device-width, initial-scale=1,
+ user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+</head>
+<body>
+<form id="registrationForm" method="post" action="{{ myurl }}">
+ <div>
+ {% if error is defined %}
+ <p class="error"><strong>Error: {{ error }}</strong></p>
+ {% endif %}
+ <p>
+ Please enter a registration token.
+ </p>
+ <input type="hidden" name="session" value="{{ session }}" />
+ <input type="text" name="token" />
+ <input type="submit" value="Authenticate" />
+ </div>
+</form>
+</body>
+</html>
diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html
index dfef9897ee..369ff446d2 100644
--- a/synapse/res/templates/terms.html
+++ b/synapse/res/templates/terms.html
@@ -8,6 +8,9 @@
<body>
<form id="registrationForm" method="post" action="{{ myurl }}">
<div>
+ {% if error is defined %}
+ <p class="error"><strong>Error: {{ error }}</strong></p>
+ {% endif %}
<p>
Please click the button below if you agree to the
<a href="{{ terms_url }}">privacy policy of this homeserver.</a>
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index d29f2fea5e..3adc576124 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -14,39 +14,36 @@
# limitations under the License.
from synapse.http.server import JsonResource
from synapse.rest import admin
-from synapse.rest.client import versions
-from synapse.rest.client.v1 import (
- directory,
- events,
- initial_sync,
- login as v1_login,
- logout,
- presence,
- profile,
- push_rule,
- pusher,
- room,
- voip,
-)
-from synapse.rest.client.v2_alpha import (
+from synapse.rest.client import (
account,
account_data,
account_validity,
auth,
capabilities,
devices,
+ directory,
+ events,
filter,
groups,
+ initial_sync,
keys,
knock,
+ login as v1_login,
+ logout,
notifications,
openid,
password_policy,
+ presence,
+ profile,
+ push_rule,
+ pusher,
read_marker,
receipts,
register,
relations,
report_event,
+ room,
+ room_batch,
room_keys,
room_upgrade_rest_servlet,
sendtodevice,
@@ -56,6 +53,8 @@ from synapse.rest.client.v2_alpha import (
thirdparty,
tokenrefresh,
user_directory,
+ versions,
+ voip,
)
@@ -84,7 +83,6 @@ class ClientRestResource(JsonResource):
# Partially deprecated in r0
events.register_servlets(hs, client_resource)
- # "v1" + "r0"
room.register_servlets(hs, client_resource)
v1_login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource)
@@ -94,8 +92,6 @@ class ClientRestResource(JsonResource):
pusher.register_servlets(hs, client_resource)
push_rule.register_servlets(hs, client_resource)
logout.register_servlets(hs, client_resource)
-
- # "v2"
sync.register_servlets(hs, client_resource)
filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource)
@@ -117,6 +113,7 @@ class ClientRestResource(JsonResource):
user_directory.register_servlets(hs, client_resource)
groups.register_servlets(hs, client_resource)
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
+ room_batch.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource)
account_validity.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index abf749b001..b2514d9d0d 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -36,7 +36,11 @@ from synapse.rest.admin.event_reports import (
)
from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
-from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
+from synapse.rest.admin.registration_tokens import (
+ ListRegistrationTokensRestServlet,
+ NewRegistrationTokenRestServlet,
+ RegistrationTokenRestServlet,
+)
from synapse.rest.admin.rooms import (
DeleteRoomRestServlet,
ForwardExtremitiesRestServlet,
@@ -47,10 +51,10 @@ from synapse.rest.admin.rooms import (
RoomMembersRestServlet,
RoomRestServlet,
RoomStateRestServlet,
- ShutdownRoomRestServlet,
)
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
+from synapse.rest.admin.username_available import UsernameAvailableRestServlet
from synapse.rest.admin.users import (
AccountValidityRenewServlet,
DeactivateAccountRestServlet,
@@ -60,7 +64,6 @@ from synapse.rest.admin.users import (
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
- UserMediaRestServlet,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
@@ -220,11 +223,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomMembersRestServlet(hs).register(http_server)
DeleteRoomRestServlet(hs).register(http_server)
JoinRoomAliasServlet(hs).register(http_server)
- PurgeRoomServlet(hs).register(http_server)
- SendServerNoticeServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
UserAdminServlet(hs).register(http_server)
- UserMediaRestServlet(hs).register(http_server)
UserMembershipRestServlet(hs).register(http_server)
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
@@ -241,6 +241,14 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ForwardExtremitiesRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RateLimitRestServlet(hs).register(http_server)
+ UsernameAvailableRestServlet(hs).register(http_server)
+ ListRegistrationTokensRestServlet(hs).register(http_server)
+ NewRegistrationTokenRestServlet(hs).register(http_server)
+ RegistrationTokenRestServlet(hs).register(http_server)
+
+ # Some servlets only get registered for the main process.
+ if hs.config.worker_app is None:
+ SendServerNoticeServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(
@@ -253,7 +261,6 @@ def register_servlets_for_client_rest_resource(
PurgeHistoryRestServlet(hs).register(http_server)
ResetPasswordRestServlet(hs).register(http_server)
SearchUsersRestServlet(hs).register(http_server)
- ShutdownRoomRestServlet(hs).register(http_server)
UserRegisterServlet(hs).register(http_server)
DeleteGroupAdminRestServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 0a19a333d7..8ce443049e 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -18,14 +18,15 @@ from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
-from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
+from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import (
admin_patterns,
assert_requester_is_admin,
assert_user_is_admin,
)
-from synapse.types import JsonDict
+from synapse.storage.databases.main.media_repository import MediaSortOrder
+from synapse.types import JsonDict, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -259,7 +260,9 @@ class DeleteMediaByID(RestServlet):
logging.info("Deleting local media by ID: %s", media_id)
- deleted_media, total = await self.media_repository.delete_local_media(media_id)
+ deleted_media, total = await self.media_repository.delete_local_media_ids(
+ [media_id]
+ )
return 200, {"deleted_media": deleted_media, "total": total}
@@ -312,6 +315,165 @@ class DeleteMediaByDateSize(RestServlet):
return 200, {"deleted_media": deleted_media, "total": total}
+class UserMediaRestServlet(RestServlet):
+ """
+ Gets information about all uploaded local media for a specific `user_id`.
+ With DELETE request you can delete all this media.
+
+ Example:
+ http://localhost:8008/_synapse/admin/v1/users/@user:server/media
+
+ Args:
+ The parameters `from` and `limit` are required for pagination.
+ By default, a `limit` of 100 is used.
+ Returns:
+ A list of media and an integer representing the total number of
+ media that exist given for this user
+ """
+
+ PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.is_mine = hs.is_mine
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+ self.media_repository = hs.get_media_repository()
+
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
+ # This will always be set by the time Twisted calls us.
+ assert request.args is not None
+
+ await assert_requester_is_admin(self.auth, request)
+
+ if not self.is_mine(UserID.from_string(user_id)):
+ raise SynapseError(400, "Can only look up local users")
+
+ user = await self.store.get_user_by_id(user_id)
+ if user is None:
+ raise NotFoundError("Unknown user")
+
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
+
+ if start < 0:
+ raise SynapseError(
+ 400,
+ "Query parameter from must be a string representing a positive integer.",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ if limit < 0:
+ raise SynapseError(
+ 400,
+ "Query parameter limit must be a string representing a positive integer.",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ # If neither `order_by` nor `dir` is set, set the default order
+ # to newest media is on top for backward compatibility.
+ if b"order_by" not in request.args and b"dir" not in request.args:
+ order_by = MediaSortOrder.CREATED_TS.value
+ direction = "b"
+ else:
+ order_by = parse_string(
+ request,
+ "order_by",
+ default=MediaSortOrder.CREATED_TS.value,
+ allowed_values=(
+ MediaSortOrder.MEDIA_ID.value,
+ MediaSortOrder.UPLOAD_NAME.value,
+ MediaSortOrder.CREATED_TS.value,
+ MediaSortOrder.LAST_ACCESS_TS.value,
+ MediaSortOrder.MEDIA_LENGTH.value,
+ MediaSortOrder.MEDIA_TYPE.value,
+ MediaSortOrder.QUARANTINED_BY.value,
+ MediaSortOrder.SAFE_FROM_QUARANTINE.value,
+ ),
+ )
+ direction = parse_string(
+ request, "dir", default="f", allowed_values=("f", "b")
+ )
+
+ media, total = await self.store.get_local_media_by_user_paginate(
+ start, limit, user_id, order_by, direction
+ )
+
+ ret = {"media": media, "total": total}
+ if (start + limit) < total:
+ ret["next_token"] = start + len(media)
+
+ return 200, ret
+
+ async def on_DELETE(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
+ # This will always be set by the time Twisted calls us.
+ assert request.args is not None
+
+ await assert_requester_is_admin(self.auth, request)
+
+ if not self.is_mine(UserID.from_string(user_id)):
+ raise SynapseError(400, "Can only look up local users")
+
+ user = await self.store.get_user_by_id(user_id)
+ if user is None:
+ raise NotFoundError("Unknown user")
+
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
+
+ if start < 0:
+ raise SynapseError(
+ 400,
+ "Query parameter from must be a string representing a positive integer.",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ if limit < 0:
+ raise SynapseError(
+ 400,
+ "Query parameter limit must be a string representing a positive integer.",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ # If neither `order_by` nor `dir` is set, set the default order
+ # to newest media is on top for backward compatibility.
+ if b"order_by" not in request.args and b"dir" not in request.args:
+ order_by = MediaSortOrder.CREATED_TS.value
+ direction = "b"
+ else:
+ order_by = parse_string(
+ request,
+ "order_by",
+ default=MediaSortOrder.CREATED_TS.value,
+ allowed_values=(
+ MediaSortOrder.MEDIA_ID.value,
+ MediaSortOrder.UPLOAD_NAME.value,
+ MediaSortOrder.CREATED_TS.value,
+ MediaSortOrder.LAST_ACCESS_TS.value,
+ MediaSortOrder.MEDIA_LENGTH.value,
+ MediaSortOrder.MEDIA_TYPE.value,
+ MediaSortOrder.QUARANTINED_BY.value,
+ MediaSortOrder.SAFE_FROM_QUARANTINE.value,
+ ),
+ )
+ direction = parse_string(
+ request, "dir", default="f", allowed_values=("f", "b")
+ )
+
+ media, _ = await self.store.get_local_media_by_user_paginate(
+ start, limit, user_id, order_by, direction
+ )
+
+ deleted_media, total = await self.media_repository.delete_local_media_ids(
+ ([row["media_id"] for row in media])
+ )
+
+ return 200, {"deleted_media": deleted_media, "total": total}
+
+
def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Media repo specific APIs.
@@ -326,3 +488,4 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
ListMediaInRoom(hs).register(http_server)
DeleteMediaByID(hs).register(http_server)
DeleteMediaByDateSize(hs).register(http_server)
+ UserMediaRestServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py
new file mode 100644
index 0000000000..5a1c929d85
--- /dev/null
+++ b/synapse/rest/admin/registration_tokens.py
@@ -0,0 +1,321 @@
+# Copyright 2021 Callum Brown
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import string
+from typing import TYPE_CHECKING, Tuple
+
+from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.http.servlet import (
+ RestServlet,
+ parse_boolean,
+ parse_json_object_from_request,
+)
+from synapse.http.site import SynapseRequest
+from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class ListRegistrationTokensRestServlet(RestServlet):
+ """List registration tokens.
+
+ To list all tokens:
+
+ GET /_synapse/admin/v1/registration_tokens
+
+ 200 OK
+
+ {
+ "registration_tokens": [
+ {
+ "token": "abcd",
+ "uses_allowed": 3,
+ "pending": 0,
+ "completed": 1,
+ "expiry_time": null
+ },
+ {
+ "token": "wxyz",
+ "uses_allowed": null,
+ "pending": 0,
+ "completed": 9,
+ "expiry_time": 1625394937000
+ }
+ ]
+ }
+
+ The optional query parameter `valid` can be used to filter the response.
+ If it is `true`, only valid tokens are returned. If it is `false`, only
+ tokens that have expired or have had all uses exhausted are returned.
+ If it is omitted, all tokens are returned regardless of validity.
+ """
+
+ PATTERNS = admin_patterns("/registration_tokens$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ await assert_requester_is_admin(self.auth, request)
+ valid = parse_boolean(request, "valid")
+ token_list = await self.store.get_registration_tokens(valid)
+ return 200, {"registration_tokens": token_list}
+
+
+class NewRegistrationTokenRestServlet(RestServlet):
+ """Create a new registration token.
+
+ For example, to create a token specifying some fields:
+
+ POST /_synapse/admin/v1/registration_tokens/new
+
+ {
+ "token": "defg",
+ "uses_allowed": 1
+ }
+
+ 200 OK
+
+ {
+ "token": "defg",
+ "uses_allowed": 1,
+ "pending": 0,
+ "completed": 0,
+ "expiry_time": null
+ }
+
+ Defaults are used for any fields not specified.
+ """
+
+ PATTERNS = admin_patterns("/registration_tokens/new$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ # A string of all the characters allowed to be in a registration_token
+ self.allowed_chars = string.ascii_letters + string.digits + "-_"
+ self.allowed_chars_set = set(self.allowed_chars)
+
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ await assert_requester_is_admin(self.auth, request)
+ body = parse_json_object_from_request(request)
+
+ if "token" in body:
+ token = body["token"]
+ if not isinstance(token, str):
+ raise SynapseError(400, "token must be a string", Codes.INVALID_PARAM)
+ if not (0 < len(token) <= 64):
+ raise SynapseError(
+ 400,
+ "token must not be empty and must not be longer than 64 characters",
+ Codes.INVALID_PARAM,
+ )
+ if not set(token).issubset(self.allowed_chars_set):
+ raise SynapseError(
+ 400,
+ "token must consist only of characters matched by the regex [A-Za-z0-9-_]",
+ Codes.INVALID_PARAM,
+ )
+
+ else:
+ # Get length of token to generate (default is 16)
+ length = body.get("length", 16)
+ if not isinstance(length, int):
+ raise SynapseError(
+ 400, "length must be an integer", Codes.INVALID_PARAM
+ )
+ if not (0 < length <= 64):
+ raise SynapseError(
+ 400,
+ "length must be greater than zero and not greater than 64",
+ Codes.INVALID_PARAM,
+ )
+
+ # Generate token
+ token = await self.store.generate_registration_token(
+ length, self.allowed_chars
+ )
+
+ uses_allowed = body.get("uses_allowed", None)
+ if not (
+ uses_allowed is None
+ or (isinstance(uses_allowed, int) and uses_allowed >= 0)
+ ):
+ raise SynapseError(
+ 400,
+ "uses_allowed must be a non-negative integer or null",
+ Codes.INVALID_PARAM,
+ )
+
+ expiry_time = body.get("expiry_time", None)
+ if not isinstance(expiry_time, (int, type(None))):
+ raise SynapseError(
+ 400, "expiry_time must be an integer or null", Codes.INVALID_PARAM
+ )
+ if isinstance(expiry_time, int) and expiry_time < self.clock.time_msec():
+ raise SynapseError(
+ 400, "expiry_time must not be in the past", Codes.INVALID_PARAM
+ )
+
+ created = await self.store.create_registration_token(
+ token, uses_allowed, expiry_time
+ )
+ if not created:
+ raise SynapseError(
+ 400, f"Token already exists: {token}", Codes.INVALID_PARAM
+ )
+
+ resp = {
+ "token": token,
+ "uses_allowed": uses_allowed,
+ "pending": 0,
+ "completed": 0,
+ "expiry_time": expiry_time,
+ }
+ return 200, resp
+
+
+class RegistrationTokenRestServlet(RestServlet):
+ """Retrieve, update, or delete the given token.
+
+ For example,
+
+ to retrieve a token:
+
+ GET /_synapse/admin/v1/registration_tokens/abcd
+
+ 200 OK
+
+ {
+ "token": "abcd",
+ "uses_allowed": 3,
+ "pending": 0,
+ "completed": 1,
+ "expiry_time": null
+ }
+
+
+ to update a token:
+
+ PUT /_synapse/admin/v1/registration_tokens/defg
+
+ {
+ "uses_allowed": 5,
+ "expiry_time": 4781243146000
+ }
+
+ 200 OK
+
+ {
+ "token": "defg",
+ "uses_allowed": 5,
+ "pending": 0,
+ "completed": 0,
+ "expiry_time": 4781243146000
+ }
+
+
+ to delete a token:
+
+ DELETE /_synapse/admin/v1/registration_tokens/wxyz
+
+ 200 OK
+
+ {}
+ """
+
+ PATTERNS = admin_patterns("/registration_tokens/(?P<token>[^/]*)$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.hs = hs
+ self.clock = hs.get_clock()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+
+ async def on_GET(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDict]:
+ """Retrieve a registration token."""
+ await assert_requester_is_admin(self.auth, request)
+ token_info = await self.store.get_one_registration_token(token)
+
+ # If no result return a 404
+ if token_info is None:
+ raise NotFoundError(f"No such registration token: {token}")
+
+ return 200, token_info
+
+ async def on_PUT(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDict]:
+ """Update a registration token."""
+ await assert_requester_is_admin(self.auth, request)
+ body = parse_json_object_from_request(request)
+ new_attributes = {}
+
+ # Only add uses_allowed to new_attributes if it is present and valid
+ if "uses_allowed" in body:
+ uses_allowed = body["uses_allowed"]
+ if not (
+ uses_allowed is None
+ or (isinstance(uses_allowed, int) and uses_allowed >= 0)
+ ):
+ raise SynapseError(
+ 400,
+ "uses_allowed must be a non-negative integer or null",
+ Codes.INVALID_PARAM,
+ )
+ new_attributes["uses_allowed"] = uses_allowed
+
+ if "expiry_time" in body:
+ expiry_time = body["expiry_time"]
+ if not isinstance(expiry_time, (int, type(None))):
+ raise SynapseError(
+ 400, "expiry_time must be an integer or null", Codes.INVALID_PARAM
+ )
+ if isinstance(expiry_time, int) and expiry_time < self.clock.time_msec():
+ raise SynapseError(
+ 400, "expiry_time must not be in the past", Codes.INVALID_PARAM
+ )
+ new_attributes["expiry_time"] = expiry_time
+
+ if len(new_attributes) == 0:
+ # Nothing to update, get token info to return
+ token_info = await self.store.get_one_registration_token(token)
+ else:
+ token_info = await self.store.update_registration_token(
+ token, new_attributes
+ )
+
+ # If no result return a 404
+ if token_info is None:
+ raise NotFoundError(f"No such registration token: {token}")
+
+ return 200, token_info
+
+ async def on_DELETE(
+ self, request: SynapseRequest, token: str
+ ) -> Tuple[int, JsonDict]:
+ """Delete a registration token."""
+ await assert_requester_is_admin(self.auth, request)
+
+ if await self.store.delete_registration_token(token):
+ return 200, {}
+
+ raise NotFoundError(f"No such registration token: {token}")
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 40ee33646c..ad83d4b54c 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -20,6 +20,7 @@ from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.http.servlet import (
+ ResolveRoomIdMixin,
RestServlet,
assert_params_in_dict,
parse_integer,
@@ -33,7 +34,7 @@ from synapse.rest.admin._base import (
assert_user_is_admin,
)
from synapse.storage.databases.main.room import RoomSortOrder
-from synapse.types import JsonDict, RoomAlias, RoomID, UserID, create_requester
+from synapse.types import JsonDict, UserID, create_requester
from synapse.util import json_decoder
if TYPE_CHECKING:
@@ -45,83 +46,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class ResolveRoomIdMixin:
- def __init__(self, hs: "HomeServer"):
- self.room_member_handler = hs.get_room_member_handler()
-
- async def resolve_room_id(
- self, room_identifier: str, remote_room_hosts: Optional[List[str]] = None
- ) -> Tuple[str, Optional[List[str]]]:
- """
- Resolve a room identifier to a room ID, if necessary.
-
- This also performanes checks to ensure the room ID is of the proper form.
-
- Args:
- room_identifier: The room ID or alias.
- remote_room_hosts: The potential remote room hosts to use.
-
- Returns:
- The resolved room ID.
-
- Raises:
- SynapseError if the room ID is of the wrong form.
- """
- if RoomID.is_valid(room_identifier):
- resolved_room_id = room_identifier
- elif RoomAlias.is_valid(room_identifier):
- room_alias = RoomAlias.from_string(room_identifier)
- (
- room_id,
- remote_room_hosts,
- ) = await self.room_member_handler.lookup_room_alias(room_alias)
- resolved_room_id = room_id.to_string()
- else:
- raise SynapseError(
- 400, "%s was not legal room ID or room alias" % (room_identifier,)
- )
- if not resolved_room_id:
- raise SynapseError(
- 400, "Unknown room ID or room alias %s" % room_identifier
- )
- return resolved_room_id, remote_room_hosts
-
-
-class ShutdownRoomRestServlet(RestServlet):
- """Shuts down a room by removing all local users from the room and blocking
- all future invites and joins to the room. Any local aliases will be repointed
- to a new room created by `new_room_user_id` and kicked users will be auto
- joined to the new room.
- """
-
- PATTERNS = admin_patterns("/shutdown_room/(?P<room_id>[^/]+)")
-
- def __init__(self, hs: "HomeServer"):
- self.hs = hs
- self.auth = hs.get_auth()
- self.room_shutdown_handler = hs.get_room_shutdown_handler()
-
- async def on_POST(
- self, request: SynapseRequest, room_id: str
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
-
- content = parse_json_object_from_request(request)
- assert_params_in_dict(content, ["new_room_user_id"])
-
- ret = await self.room_shutdown_handler.shutdown_room(
- room_id=room_id,
- new_room_user_id=content["new_room_user_id"],
- new_room_name=content.get("room_name"),
- message=content.get("message"),
- requester_user_id=requester.user.to_string(),
- block=True,
- )
-
- return (200, ret)
-
-
class DeleteRoomRestServlet(RestServlet):
"""Delete a room from server.
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index b5e4c474ef..f5a38c2670 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -11,10 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Optional, Tuple
from synapse.api.constants import EventTypes
-from synapse.api.errors import SynapseError
+from synapse.api.errors import NotFoundError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
@@ -53,6 +53,8 @@ class SendServerNoticeServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
+ self.server_notices_manager = hs.get_server_notices_manager()
+ self.admin_handler = hs.get_admin_handler()
self.txns = HttpTransactionCache(hs)
def register(self, json_resource: HttpServer):
@@ -79,24 +81,29 @@ class SendServerNoticeServlet(RestServlet):
# We grab the server notices manager here as its initialisation has a check for worker processes,
# but worker processes still need to initialise SendServerNoticeServlet (as it is part of the
# admin api).
- if not self.hs.get_server_notices_manager().is_enabled():
+ if not self.server_notices_manager.is_enabled():
raise SynapseError(400, "Server notices are not enabled on this server")
- user_id = body["user_id"]
- UserID.from_string(user_id)
- if not self.hs.is_mine_id(user_id):
+ target_user = UserID.from_string(body["user_id"])
+ if not self.hs.is_mine(target_user):
raise SynapseError(400, "Server notices can only be sent to local users")
- event = await self.hs.get_server_notices_manager().send_notice(
- user_id=body["user_id"],
+ if not await self.admin_handler.get_user(target_user):
+ raise NotFoundError("User not found")
+
+ event = await self.server_notices_manager.send_notice(
+ user_id=target_user.to_string(),
type=event_type,
state_key=state_key,
event_content=body["content"],
+ txn_id=txn_id,
)
return 200, {"event_id": event.event_id}
- def on_PUT(self, request: SynapseRequest, txn_id: str) -> Tuple[int, JsonDict]:
+ def on_PUT(
+ self, request: SynapseRequest, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, txn_id
)
diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/username_available.py
index 2365ff7a0f..2bf1472967 100644
--- a/synapse/rest/admin/purge_room_servlet.py
+++ b/synapse/rest/admin/username_available.py
@@ -11,48 +11,41 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
+from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
-from synapse.http.servlet import (
- RestServlet,
- assert_params_in_dict,
- parse_json_object_from_request,
-)
+from synapse.http.servlet import RestServlet, parse_string
from synapse.http.site import SynapseRequest
-from synapse.rest.admin import assert_requester_is_admin
-from synapse.rest.admin._base import admin_patterns
+from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
+logger = logging.getLogger(__name__)
-class PurgeRoomServlet(RestServlet):
- """Servlet which will remove all trace of a room from the database
- POST /_synapse/admin/v1/purge_room
- {
- "room_id": "!room:id"
- }
+class UsernameAvailableRestServlet(RestServlet):
+ """An admin API to check if a given username is available, regardless of whether registration is enabled.
- returns:
-
- {}
+ Example:
+ GET /_synapse/admin/v1/username_available?username=foo
+ 200 OK
+ {
+ "available": true
+ }
"""
- PATTERNS = admin_patterns("/purge_room$")
+ PATTERNS = admin_patterns("/username_available")
def __init__(self, hs: "HomeServer"):
- self.hs = hs
self.auth = hs.get_auth()
- self.pagination_handler = hs.get_pagination_handler()
+ self.registration_handler = hs.get_registration_handler()
- async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ("room_id",))
-
- await self.pagination_handler.purge_room(body["room_id"])
-
- return 200, {}
+ username = parse_string(request, "username", required=True)
+ await self.registration_handler.check_username(username)
+ return HTTPStatus.OK, {"available": True}
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index eef76ab18a..c1a1ba645e 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -34,8 +34,7 @@ from synapse.rest.admin._base import (
assert_requester_is_admin,
assert_user_is_admin,
)
-from synapse.rest.client.v2_alpha._base import client_patterns
-from synapse.storage.databases.main.media_repository import MediaSortOrder
+from synapse.rest.client._base import client_patterns
from synapse.storage.databases.main.stats import UserSortOrder
from synapse.types import JsonDict, UserID
@@ -172,7 +171,7 @@ class UserRestServletV2(RestServlet):
target_user = UserID.from_string(user_id)
if not self.hs.is_mine(target_user):
- raise SynapseError(400, "Can only lookup local users")
+ raise SynapseError(400, "Can only look up local users")
ret = await self.admin_handler.get_user(target_user)
@@ -196,42 +195,115 @@ class UserRestServletV2(RestServlet):
user = await self.admin_handler.get_user(target_user)
user_id = target_user.to_string()
+ # check for required parameters for each threepid
+ threepids = body.get("threepids")
+ if threepids is not None:
+ for threepid in threepids:
+ assert_params_in_dict(threepid, ["medium", "address"])
+
+ # check for required parameters for each external_id
+ external_ids = body.get("external_ids")
+ if external_ids is not None:
+ for external_id in external_ids:
+ assert_params_in_dict(external_id, ["auth_provider", "external_id"])
+
+ user_type = body.get("user_type", None)
+ if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
+ raise SynapseError(400, "Invalid user type")
+
+ set_admin_to = body.get("admin", False)
+ if not isinstance(set_admin_to, bool):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Param 'admin' must be a boolean, if given",
+ Codes.BAD_JSON,
+ )
+
+ password = body.get("password", None)
+ if password is not None:
+ if not isinstance(password, str) or len(password) > 512:
+ raise SynapseError(400, "Invalid password")
+
+ deactivate = body.get("deactivated", False)
+ if not isinstance(deactivate, bool):
+ raise SynapseError(400, "'deactivated' parameter is not of type boolean")
+
+ # convert List[Dict[str, str]] into Set[Tuple[str, str]]
+ if external_ids is not None:
+ new_external_ids = {
+ (external_id["auth_provider"], external_id["external_id"])
+ for external_id in external_ids
+ }
+
+ # convert List[Dict[str, str]] into Set[Tuple[str, str]]
+ if threepids is not None:
+ new_threepids = {
+ (threepid["medium"], threepid["address"]) for threepid in threepids
+ }
+
if user: # modify user
if "displayname" in body:
await self.profile_handler.set_displayname(
target_user, requester, body["displayname"], True
)
- if "threepids" in body:
- # check for required parameters for each threepid
- for threepid in body["threepids"]:
- assert_params_in_dict(threepid, ["medium", "address"])
+ if threepids is not None:
+ # get changed threepids (added and removed)
+ # convert List[Dict[str, Any]] into Set[Tuple[str, str]]
+ cur_threepids = {
+ (threepid["medium"], threepid["address"])
+ for threepid in await self.store.user_get_threepids(user_id)
+ }
+ add_threepids = new_threepids - cur_threepids
+ del_threepids = cur_threepids - new_threepids
- # remove old threepids from user
- threepids = await self.store.user_get_threepids(user_id)
- for threepid in threepids:
+ # remove old threepids
+ for medium, address in del_threepids:
try:
await self.auth_handler.delete_threepid(
- user_id, threepid["medium"], threepid["address"], None
+ user_id, medium, address, None
)
except Exception:
logger.exception("Failed to remove threepids")
raise SynapseError(500, "Failed to remove threepids")
- # add new threepids to user
+ # add new threepids
current_time = self.hs.get_clock().time_msec()
- for threepid in body["threepids"]:
+ for medium, address in add_threepids:
await self.auth_handler.add_threepid(
- user_id, threepid["medium"], threepid["address"], current_time
+ user_id, medium, address, current_time
+ )
+
+ if external_ids is not None:
+ # get changed external_ids (added and removed)
+ cur_external_ids = set(
+ await self.store.get_external_ids_by_user(user_id)
+ )
+ add_external_ids = new_external_ids - cur_external_ids
+ del_external_ids = cur_external_ids - new_external_ids
+
+ # remove old external_ids
+ for auth_provider, external_id in del_external_ids:
+ await self.store.remove_user_external_id(
+ auth_provider,
+ external_id,
+ user_id,
)
- if "avatar_url" in body and type(body["avatar_url"]) == str:
+ # add new external_ids
+ for auth_provider, external_id in add_external_ids:
+ await self.store.record_user_external_id(
+ auth_provider,
+ external_id,
+ user_id,
+ )
+
+ if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
)
if "admin" in body:
- set_admin_to = bool(body["admin"])
if set_admin_to != user["admin"]:
auth_user = requester.user
if target_user == auth_user and not set_admin_to:
@@ -239,29 +311,18 @@ class UserRestServletV2(RestServlet):
await self.store.set_server_admin(target_user, set_admin_to)
- if "password" in body:
- if not isinstance(body["password"], str) or len(body["password"]) > 512:
- raise SynapseError(400, "Invalid password")
- else:
- new_password = body["password"]
- logout_devices = True
-
- new_password_hash = await self.auth_handler.hash(new_password)
-
- await self.set_password_handler.set_password(
- target_user.to_string(),
- new_password_hash,
- logout_devices,
- requester,
- )
+ if password is not None:
+ logout_devices = True
+ new_password_hash = await self.auth_handler.hash(password)
+
+ await self.set_password_handler.set_password(
+ target_user.to_string(),
+ new_password_hash,
+ logout_devices,
+ requester,
+ )
if "deactivated" in body:
- deactivate = body["deactivated"]
- if not isinstance(deactivate, bool):
- raise SynapseError(
- 400, "'deactivated' parameter is not of type boolean"
- )
-
if deactivate and not user["deactivated"]:
await self.deactivate_account_handler.deactivate_account(
target_user.to_string(), False, requester, by_admin=True
@@ -285,38 +346,26 @@ class UserRestServletV2(RestServlet):
return 200, user
else: # create user
- password = body.get("password")
+ displayname = body.get("displayname", None)
+
password_hash = None
if password is not None:
- if not isinstance(password, str) or len(password) > 512:
- raise SynapseError(400, "Invalid password")
password_hash = await self.auth_handler.hash(password)
- admin = body.get("admin", None)
- user_type = body.get("user_type", None)
- displayname = body.get("displayname", None)
-
- if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
- raise SynapseError(400, "Invalid user type")
-
user_id = await self.registration_handler.register_user(
localpart=target_user.localpart,
password_hash=password_hash,
- admin=bool(admin),
+ admin=set_admin_to,
default_display_name=displayname,
user_type=user_type,
by_admin=True,
)
- if "threepids" in body:
- # check for required parameters for each threepid
- for threepid in body["threepids"]:
- assert_params_in_dict(threepid, ["medium", "address"])
-
+ if threepids is not None:
current_time = self.hs.get_clock().time_msec()
- for threepid in body["threepids"]:
+ for medium, address in new_threepids:
await self.auth_handler.add_threepid(
- user_id, threepid["medium"], threepid["address"], current_time
+ user_id, medium, address, current_time
)
if (
self.hs.config.email_enable_notifs
@@ -328,12 +377,20 @@ class UserRestServletV2(RestServlet):
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
- device_display_name=threepid["address"],
- pushkey=threepid["address"],
+ device_display_name=address,
+ pushkey=address,
lang=None, # We don't know a user's language here
data={},
)
+ if external_ids is not None:
+ for auth_provider, external_id in new_external_ids:
+ await self.store.record_user_external_id(
+ auth_provider,
+ external_id,
+ user_id,
+ )
+
if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
@@ -461,7 +518,7 @@ class UserRegisterServlet(RestServlet):
raise SynapseError(403, "HMAC incorrect")
# Reuse the parts of RegisterRestServlet to reduce code duplication
- from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+ from synapse.rest.client.register import RegisterRestServlet
register = RegisterRestServlet(self.hs)
@@ -796,7 +853,7 @@ class PushersRestServlet(RestServlet):
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
- raise SynapseError(400, "Can only lookup local users")
+ raise SynapseError(400, "Can only look up local users")
if not await self.store.get_user_by_id(user_id):
raise NotFoundError("User not found")
@@ -808,97 +865,6 @@ class PushersRestServlet(RestServlet):
return 200, {"pushers": filtered_pushers, "total": len(filtered_pushers)}
-class UserMediaRestServlet(RestServlet):
- """
- Gets information about all uploaded local media for a specific `user_id`.
-
- Example:
- http://localhost:8008/_synapse/admin/v1/users/
- @user:server/media
-
- Args:
- The parameters `from` and `limit` are required for pagination.
- By default, a `limit` of 100 is used.
- Returns:
- A list of media and an integer representing the total number of
- media that exist given for this user
- """
-
- PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
-
- def __init__(self, hs: "HomeServer"):
- self.is_mine = hs.is_mine
- self.auth = hs.get_auth()
- self.store = hs.get_datastore()
-
- async def on_GET(
- self, request: SynapseRequest, user_id: str
- ) -> Tuple[int, JsonDict]:
- # This will always be set by the time Twisted calls us.
- assert request.args is not None
-
- await assert_requester_is_admin(self.auth, request)
-
- if not self.is_mine(UserID.from_string(user_id)):
- raise SynapseError(400, "Can only lookup local users")
-
- user = await self.store.get_user_by_id(user_id)
- if user is None:
- raise NotFoundError("Unknown user")
-
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
-
- if start < 0:
- raise SynapseError(
- 400,
- "Query parameter from must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- if limit < 0:
- raise SynapseError(
- 400,
- "Query parameter limit must be a string representing a positive integer.",
- errcode=Codes.INVALID_PARAM,
- )
-
- # If neither `order_by` nor `dir` is set, set the default order
- # to newest media is on top for backward compatibility.
- if b"order_by" not in request.args and b"dir" not in request.args:
- order_by = MediaSortOrder.CREATED_TS.value
- direction = "b"
- else:
- order_by = parse_string(
- request,
- "order_by",
- default=MediaSortOrder.CREATED_TS.value,
- allowed_values=(
- MediaSortOrder.MEDIA_ID.value,
- MediaSortOrder.UPLOAD_NAME.value,
- MediaSortOrder.CREATED_TS.value,
- MediaSortOrder.LAST_ACCESS_TS.value,
- MediaSortOrder.MEDIA_LENGTH.value,
- MediaSortOrder.MEDIA_TYPE.value,
- MediaSortOrder.QUARANTINED_BY.value,
- MediaSortOrder.SAFE_FROM_QUARANTINE.value,
- ),
- )
- direction = parse_string(
- request, "dir", default="f", allowed_values=("f", "b")
- )
-
- media, total = await self.store.get_local_media_by_user_paginate(
- start, limit, user_id, order_by, direction
- )
-
- ret = {"media": media, "total": total}
- if (start + limit) < total:
- ret["next_token"] = start + len(media)
-
- return 200, ret
-
-
class UserTokenRestServlet(RestServlet):
"""An admin API for logging in as a user.
@@ -1017,7 +983,7 @@ class RateLimitRestServlet(RestServlet):
await assert_requester_is_admin(self.auth, request)
if not self.hs.is_mine_id(user_id):
- raise SynapseError(400, "Can only lookup local users")
+ raise SynapseError(400, "Can only look up local users")
if not await self.store.get_user_by_id(user_id):
raise NotFoundError("User not found")
diff --git a/synapse/rest/client/__init__.py b/synapse/rest/client/__init__.py
index 629e2df74a..f9830cc51f 100644
--- a/synapse/rest/client/__init__.py
+++ b/synapse/rest/client/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2014-2016 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/_base.py
index 0443f4571c..a0971ce994 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/_base.py
@@ -16,7 +16,7 @@
"""
import logging
import re
-from typing import Iterable, Pattern
+from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast
from synapse.api.errors import InteractiveAuthIncompleteError
from synapse.api.urls import CLIENT_API_PREFIX
@@ -76,7 +76,10 @@ def set_timeline_upper_limit(filter_json: JsonDict, filter_timeline_limit: int)
)
-def interactive_auth_handler(orig):
+C = TypeVar("C", bound=Callable[..., Awaitable[Tuple[int, JsonDict]]])
+
+
+def interactive_auth_handler(orig: C) -> C:
"""Wraps an on_POST method to handle InteractiveAuthIncompleteErrors
Takes a on_POST method which returns an Awaitable (errcode, body) response
@@ -91,10 +94,10 @@ def interactive_auth_handler(orig):
await self.auth_handler.check_auth
"""
- async def wrapped(*args, **kwargs):
+ async def wrapped(*args: Any, **kwargs: Any) -> Tuple[int, JsonDict]:
try:
return await orig(*args, **kwargs)
except InteractiveAuthIncompleteError as e:
return 401, e.result
- return wrapped
+ return cast(C, wrapped)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/account.py
index fb5ad2906e..aefaaa8ae8 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/account.py
@@ -16,9 +16,11 @@
import logging
import random
from http import HTTPStatus
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional, Tuple
from urllib.parse import urlparse
+from twisted.web.server import Request
+
from synapse.api.constants import LoginType
from synapse.api.errors import (
Codes,
@@ -28,15 +30,17 @@ from synapse.api.errors import (
)
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
-from synapse.http.server import finish_request, respond_with_html
+from synapse.http.server import HttpServer, finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
+from synapse.types import JsonDict
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import check_3pid_allowed, validate_email
@@ -68,7 +72,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
template_text=self.config.email_password_reset_template_text,
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -159,7 +163,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
class PasswordRestServlet(RestServlet):
PATTERNS = client_patterns("/account/password$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -169,7 +173,7 @@ class PasswordRestServlet(RestServlet):
self._set_password_handler = hs.get_set_password_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
# we do basic sanity checks here because the auth layer will store these
@@ -190,6 +194,7 @@ class PasswordRestServlet(RestServlet):
#
# In the second case, we require a password to confirm their identity.
+ requester = None
if self.auth.has_access_token(request):
requester = await self.auth.get_user_by_req(request)
try:
@@ -206,16 +211,15 @@ class PasswordRestServlet(RestServlet):
# If a password is available now, hash the provided password and
# store it for later.
if new_password:
- password_hash = await self.auth_handler.hash(new_password)
+ new_password_hash = await self.auth_handler.hash(new_password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
- password_hash,
+ new_password_hash,
)
raise
user_id = requester.user.to_string()
else:
- requester = None
try:
result, params, session_id = await self.auth_handler.check_ui_auth(
[[LoginType.EMAIL_IDENTITY]],
@@ -230,11 +234,11 @@ class PasswordRestServlet(RestServlet):
# If a password is available now, hash the provided password and
# store it for later.
if new_password:
- password_hash = await self.auth_handler.hash(new_password)
+ new_password_hash = await self.auth_handler.hash(new_password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
- password_hash,
+ new_password_hash,
)
raise
@@ -264,7 +268,7 @@ class PasswordRestServlet(RestServlet):
# If we have a password in this request, prefer it. Otherwise, use the
# password hash from an earlier request.
if new_password:
- password_hash = await self.auth_handler.hash(new_password)
+ password_hash: Optional[str] = await self.auth_handler.hash(new_password)
elif session_id is not None:
password_hash = await self.auth_handler.get_session_data(
session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None
@@ -288,7 +292,7 @@ class PasswordRestServlet(RestServlet):
class DeactivateAccountRestServlet(RestServlet):
PATTERNS = client_patterns("/account/deactivate$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -296,7 +300,7 @@ class DeactivateAccountRestServlet(RestServlet):
self._deactivate_account_handler = hs.get_deactivate_account_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
erase = body.get("erase", False)
if not isinstance(erase, bool):
@@ -338,7 +342,7 @@ class DeactivateAccountRestServlet(RestServlet):
class EmailThreepidRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/email/requestToken$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.config = hs.config
@@ -353,7 +357,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
template_text=self.config.email_add_threepid_template_text,
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -449,7 +453,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
self.store = self.hs.get_datastore()
self.identity_handler = hs.get_identity_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(
body, ["client_secret", "country", "phone_number", "send_attempt"]
@@ -525,11 +529,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
"/add_threepid/email/submit_token$", releases=(), unstable=True
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
self.clock = hs.get_clock()
@@ -539,7 +539,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
self.config.email_add_threepid_template_failure_html
)
- async def on_GET(self, request):
+ async def on_GET(self, request: Request) -> None:
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -596,18 +596,14 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"/add_threepid/msisdn/submit_token$", releases=(), unstable=True
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.identity_handler = hs.get_identity_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: Request) -> Tuple[int, JsonDict]:
if not self.config.account_threepid_delegate_msisdn:
raise SynapseError(
400,
@@ -632,7 +628,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
class ThreepidRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
@@ -640,14 +636,14 @@ class ThreepidRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
self.datastore = self.hs.get_datastore()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
threepids = await self.datastore.user_get_threepids(requester.user.to_string())
return 200, {"threepids": threepids}
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -688,7 +684,7 @@ class ThreepidRestServlet(RestServlet):
class ThreepidAddRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/add$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
@@ -696,7 +692,7 @@ class ThreepidAddRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -738,13 +734,13 @@ class ThreepidAddRestServlet(RestServlet):
class ThreepidBindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/bind$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["id_server", "sid", "client_secret"])
@@ -767,14 +763,14 @@ class ThreepidBindRestServlet(RestServlet):
class ThreepidUnbindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/unbind$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
self.datastore = self.hs.get_datastore()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Unbind the given 3pid from a specific identity server, or identity servers that are
known to have this 3pid bound
"""
@@ -798,13 +794,13 @@ class ThreepidUnbindRestServlet(RestServlet):
class ThreepidDeleteRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/delete$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -835,7 +831,7 @@ class ThreepidDeleteRestServlet(RestServlet):
return 200, {"id_server_unbind_result": id_server_unbind_result}
-def assert_valid_next_link(hs: "HomeServer", next_link: str):
+def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None:
"""
Raises a SynapseError if a given next_link value is invalid
@@ -877,11 +873,11 @@ def assert_valid_next_link(hs: "HomeServer", next_link: str):
class WhoamiRestServlet(RestServlet):
PATTERNS = client_patterns("/account/whoami$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
response = {"user_id": requester.user.to_string()}
@@ -894,7 +890,7 @@ class WhoamiRestServlet(RestServlet):
return 200, response
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailPasswordRequestTokenRestServlet(hs).register(http_server)
PasswordRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/account_data.py
index 7517e9304e..d1badbdf3b 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/account_data.py
@@ -13,12 +13,19 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, NotFoundError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -32,13 +39,15 @@ class AccountDataServlet(RestServlet):
"/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.handler = hs.get_account_data_handler()
- async def on_PUT(self, request, user_id, account_data_type):
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str, account_data_type: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add account data for other users.")
@@ -49,7 +58,9 @@ class AccountDataServlet(RestServlet):
return 200, {}
- async def on_GET(self, request, user_id, account_data_type):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str, account_data_type: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get account data for other users.")
@@ -76,13 +87,19 @@ class RoomAccountDataServlet(RestServlet):
"/account_data/(?P<account_data_type>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.handler = hs.get_account_data_handler()
- async def on_PUT(self, request, user_id, room_id, account_data_type):
+ async def on_PUT(
+ self,
+ request: SynapseRequest,
+ user_id: str,
+ room_id: str,
+ account_data_type: str,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add account data for other users.")
@@ -102,7 +119,13 @@ class RoomAccountDataServlet(RestServlet):
return 200, {}
- async def on_GET(self, request, user_id, room_id, account_data_type):
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ user_id: str,
+ room_id: str,
+ account_data_type: str,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get account data for other users.")
@@ -117,6 +140,6 @@ class RoomAccountDataServlet(RestServlet):
return 200, event
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
AccountDataServlet(hs).register(http_server)
RoomAccountDataServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/account_validity.py
index 3ebe401861..6c24b96c54 100644
--- a/synapse/rest/client/v2_alpha/account_validity.py
+++ b/synapse/rest/client/account_validity.py
@@ -13,24 +13,27 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
-from synapse.api.errors import SynapseError
-from synapse.http.server import respond_with_html
-from synapse.http.servlet import RestServlet
+from twisted.web.server import Request
+
+from synapse.http.server import HttpServer, respond_with_html
+from synapse.http.servlet import RestServlet, parse_string
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class AccountValidityRenewServlet(RestServlet):
PATTERNS = client_patterns("/account_validity/renew$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
@@ -46,18 +49,14 @@ class AccountValidityRenewServlet(RestServlet):
hs.config.account_validity.account_validity_invalid_token_template
)
- async def on_GET(self, request):
- if b"token" not in request.args:
- raise SynapseError(400, "Missing renewal token")
- renewal_token = request.args[b"token"][0]
+ async def on_GET(self, request: Request) -> None:
+ renewal_token = parse_string(request, "token", required=True)
(
token_valid,
token_stale,
expiration_ts,
- ) = await self.account_activity_handler.renew_account(
- renewal_token.decode("utf8")
- )
+ ) = await self.account_activity_handler.renew_account(renewal_token)
if token_valid:
status_code = 200
@@ -77,11 +76,7 @@ class AccountValidityRenewServlet(RestServlet):
class AccountValiditySendMailServlet(RestServlet):
PATTERNS = client_patterns("/account_validity/send_mail$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
@@ -91,7 +86,7 @@ class AccountValiditySendMailServlet(RestServlet):
hs.config.account_validity.account_validity_renew_by_email_enabled
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True)
user_id = requester.user.to_string()
await self.account_activity_handler.send_renewal_email_to_user(user_id)
@@ -99,6 +94,6 @@ class AccountValiditySendMailServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
AccountValidityRenewServlet(hs).register(http_server)
AccountValiditySendMailServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/auth.py
index 6ea1b50a62..df8cc4ac7a 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/auth.py
@@ -15,11 +15,14 @@
import logging
from typing import TYPE_CHECKING
+from twisted.web.server import Request
+
from synapse.api.constants import LoginType
-from synapse.api.errors import SynapseError
+from synapse.api.errors import LoginError, SynapseError
from synapse.api.urls import CLIENT_API_PREFIX
-from synapse.http.server import respond_with_html
+from synapse.http.server import HttpServer, respond_with_html
from synapse.http.servlet import RestServlet, parse_string
+from synapse.http.site import SynapseRequest
from ._base import client_patterns
@@ -46,9 +49,10 @@ class AuthRestServlet(RestServlet):
self.registration_handler = hs.get_registration_handler()
self.recaptcha_template = hs.config.recaptcha_template
self.terms_template = hs.config.terms_template
+ self.registration_token_template = hs.config.registration_token_template
self.success_template = hs.config.fallback_success_template
- async def on_GET(self, request, stagetype):
+ async def on_GET(self, request: SynapseRequest, stagetype: str) -> None:
session = parse_string(request, "session")
if not session:
raise SynapseError(400, "No session supplied")
@@ -74,6 +78,12 @@ class AuthRestServlet(RestServlet):
# re-authenticate with their SSO provider.
html = await self.auth_handler.start_sso_ui_auth(request, session)
+ elif stagetype == LoginType.REGISTRATION_TOKEN:
+ html = self.registration_token_template.render(
+ session=session,
+ myurl=f"{CLIENT_API_PREFIX}/r0/auth/{LoginType.REGISTRATION_TOKEN}/fallback/web",
+ )
+
else:
raise SynapseError(404, "Unknown auth stage type")
@@ -81,7 +91,7 @@ class AuthRestServlet(RestServlet):
respond_with_html(request, 200, html)
return None
- async def on_POST(self, request, stagetype):
+ async def on_POST(self, request: Request, stagetype: str) -> None:
session = parse_string(request, "session")
if not session:
@@ -95,29 +105,32 @@ class AuthRestServlet(RestServlet):
authdict = {"response": response, "session": session}
- success = await self.auth_handler.add_oob_auth(
- LoginType.RECAPTCHA, authdict, request.getClientIP()
- )
-
- if success:
- html = self.success_template.render()
- else:
+ try:
+ await self.auth_handler.add_oob_auth(
+ LoginType.RECAPTCHA, authdict, request.getClientIP()
+ )
+ except LoginError as e:
+ # Authentication failed, let user try again
html = self.recaptcha_template.render(
session=session,
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.RECAPTCHA),
sitekey=self.hs.config.recaptcha_public_key,
+ error=e.msg,
)
+ else:
+ # No LoginError was raised, so authentication was successful
+ html = self.success_template.render()
+
elif stagetype == LoginType.TERMS:
authdict = {"session": session}
- success = await self.auth_handler.add_oob_auth(
- LoginType.TERMS, authdict, request.getClientIP()
- )
-
- if success:
- html = self.success_template.render()
- else:
+ try:
+ await self.auth_handler.add_oob_auth(
+ LoginType.TERMS, authdict, request.getClientIP()
+ )
+ except LoginError as e:
+ # Authentication failed, let user try again
html = self.terms_template.render(
session=session,
terms_url="%s_matrix/consent?v=%s"
@@ -127,10 +140,33 @@ class AuthRestServlet(RestServlet):
),
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.TERMS),
+ error=e.msg,
)
+ else:
+ # No LoginError was raised, so authentication was successful
+ html = self.success_template.render()
+
elif stagetype == LoginType.SSO:
# The SSO fallback workflow should not post here,
raise SynapseError(404, "Fallback SSO auth does not support POST requests.")
+
+ elif stagetype == LoginType.REGISTRATION_TOKEN:
+ token = parse_string(request, "token", required=True)
+ authdict = {"session": session, "token": token}
+
+ try:
+ await self.auth_handler.add_oob_auth(
+ LoginType.REGISTRATION_TOKEN, authdict, request.getClientIP()
+ )
+ except LoginError as e:
+ html = self.registration_token_template.render(
+ session=session,
+ myurl=f"{CLIENT_API_PREFIX}/r0/auth/{LoginType.REGISTRATION_TOKEN}/fallback/web",
+ error=e.msg,
+ )
+ else:
+ html = self.success_template.render()
+
else:
raise SynapseError(404, "Unknown auth stage type")
@@ -139,5 +175,5 @@ class AuthRestServlet(RestServlet):
return None
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
AuthRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/capabilities.py
index 88e3aac797..65b3b5ce2c 100644
--- a/synapse/rest/client/v2_alpha/capabilities.py
+++ b/synapse/rest/client/capabilities.py
@@ -15,6 +15,7 @@ import logging
from typing import TYPE_CHECKING, Tuple
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
@@ -61,8 +62,19 @@ class CapabilitiesRestServlet(RestServlet):
"org.matrix.msc3244.room_capabilities"
] = MSC3244_CAPABILITIES
+ if self.config.experimental.msc3283_enabled:
+ response["capabilities"]["org.matrix.msc3283.set_displayname"] = {
+ "enabled": self.config.enable_set_displayname
+ }
+ response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = {
+ "enabled": self.config.enable_set_avatar_url
+ }
+ response["capabilities"]["org.matrix.msc3283.3pid_changes"] = {
+ "enabled": self.config.enable_3pid_changes
+ }
+
return 200, response
-def register_servlets(hs: "HomeServer", http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
CapabilitiesRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/devices.py
index 8b9674db06..25bc3c8f47 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/devices.py
@@ -14,34 +14,36 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api import errors
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns, interactive_auth_handler
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class DevicesRestServlet(RestServlet):
PATTERNS = client_patterns("/devices$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
devices = await self.device_handler.get_devices_by_user(
requester.user.to_string()
@@ -57,7 +59,7 @@ class DeleteDevicesRestServlet(RestServlet):
PATTERNS = client_patterns("/delete_devices")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -65,7 +67,7 @@ class DeleteDevicesRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
try:
@@ -100,18 +102,16 @@ class DeleteDevicesRestServlet(RestServlet):
class DeviceRestServlet(RestServlet):
PATTERNS = client_patterns("/devices/(?P<device_id>[^/]*)$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
self.auth_handler = hs.get_auth_handler()
- async def on_GET(self, request, device_id):
+ async def on_GET(
+ self, request: SynapseRequest, device_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
device = await self.device_handler.get_device(
requester.user.to_string(), device_id
@@ -119,7 +119,9 @@ class DeviceRestServlet(RestServlet):
return 200, device
@interactive_auth_handler
- async def on_DELETE(self, request, device_id):
+ async def on_DELETE(
+ self, request: SynapseRequest, device_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
try:
@@ -146,7 +148,9 @@ class DeviceRestServlet(RestServlet):
await self.device_handler.delete_device(requester.user.to_string(), device_id)
return 200, {}
- async def on_PUT(self, request, device_id):
+ async def on_PUT(
+ self, request: SynapseRequest, device_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
body = parse_json_object_from_request(request)
@@ -193,13 +197,13 @@ class DehydratedDeviceServlet(RestServlet):
PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=())
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
- async def on_GET(self, request: SynapseRequest):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
dehydrated_device = await self.device_handler.get_dehydrated_device(
requester.user.to_string()
@@ -211,7 +215,7 @@ class DehydratedDeviceServlet(RestServlet):
else:
raise errors.NotFoundError("No dehydrated device available")
- async def on_PUT(self, request: SynapseRequest):
+ async def on_PUT(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
submission = parse_json_object_from_request(request)
requester = await self.auth.get_user_by_req(request)
@@ -259,13 +263,13 @@ class ClaimDehydratedDeviceServlet(RestServlet):
"/org.matrix.msc2697.v2/dehydrated_device/claim", releases=()
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
- async def on_POST(self, request: SynapseRequest):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
submission = parse_json_object_from_request(request)
@@ -292,7 +296,7 @@ class ClaimDehydratedDeviceServlet(RestServlet):
return (200, result)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/directory.py
index ae92a3df8e..ee247e3d1e 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/directory.py
@@ -12,8 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import logging
+from typing import TYPE_CHECKING, Tuple
+
+from twisted.web.server import Request
from synapse.api.errors import (
AuthError,
@@ -22,14 +24,19 @@ from synapse.api.errors import (
NotFoundError,
SynapseError,
)
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.rest.client.v2_alpha._base import client_patterns
-from synapse.types import RoomAlias
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict, RoomAlias
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ClientDirectoryServer(hs).register(http_server)
ClientDirectoryListServer(hs).register(http_server)
ClientAppserviceDirectoryListServer(hs).register(http_server)
@@ -38,21 +45,23 @@ def register_servlets(hs, http_server):
class ClientDirectoryServer(RestServlet):
PATTERNS = client_patterns("/directory/room/(?P<room_alias>[^/]*)$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_alias):
- room_alias = RoomAlias.from_string(room_alias)
+ async def on_GET(self, request: Request, room_alias: str) -> Tuple[int, JsonDict]:
+ room_alias_obj = RoomAlias.from_string(room_alias)
- res = await self.directory_handler.get_association(room_alias)
+ res = await self.directory_handler.get_association(room_alias_obj)
return 200, res
- async def on_PUT(self, request, room_alias):
- room_alias = RoomAlias.from_string(room_alias)
+ async def on_PUT(
+ self, request: SynapseRequest, room_alias: str
+ ) -> Tuple[int, JsonDict]:
+ room_alias_obj = RoomAlias.from_string(room_alias)
content = parse_json_object_from_request(request)
if "room_id" not in content:
@@ -61,7 +70,7 @@ class ClientDirectoryServer(RestServlet):
)
logger.debug("Got content: %s", content)
- logger.debug("Got room name: %s", room_alias.to_string())
+ logger.debug("Got room name: %s", room_alias_obj.to_string())
room_id = content["room_id"]
servers = content["servers"] if "servers" in content else None
@@ -78,22 +87,25 @@ class ClientDirectoryServer(RestServlet):
requester = await self.auth.get_user_by_req(request)
await self.directory_handler.create_association(
- requester, room_alias, room_id, servers
+ requester, room_alias_obj, room_id, servers
)
return 200, {}
- async def on_DELETE(self, request, room_alias):
+ async def on_DELETE(
+ self, request: SynapseRequest, room_alias: str
+ ) -> Tuple[int, JsonDict]:
+ room_alias_obj = RoomAlias.from_string(room_alias)
+
try:
service = self.auth.get_appservice_by_req(request)
- room_alias = RoomAlias.from_string(room_alias)
await self.directory_handler.delete_appservice_association(
- service, room_alias
+ service, room_alias_obj
)
logger.info(
"Application service at %s deleted alias %s",
service.url,
- room_alias.to_string(),
+ room_alias_obj.to_string(),
)
return 200, {}
except InvalidClientCredentialsError:
@@ -103,12 +115,10 @@ class ClientDirectoryServer(RestServlet):
requester = await self.auth.get_user_by_req(request)
user = requester.user
- room_alias = RoomAlias.from_string(room_alias)
-
- await self.directory_handler.delete_association(requester, room_alias)
+ await self.directory_handler.delete_association(requester, room_alias_obj)
logger.info(
- "User %s deleted alias %s", user.to_string(), room_alias.to_string()
+ "User %s deleted alias %s", user.to_string(), room_alias_obj.to_string()
)
return 200, {}
@@ -117,20 +127,22 @@ class ClientDirectoryServer(RestServlet):
class ClientDirectoryListServer(RestServlet):
PATTERNS = client_patterns("/directory/list/room/(?P<room_id>[^/]*)$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id):
+ async def on_GET(self, request: Request, room_id: str) -> Tuple[int, JsonDict]:
room = await self.store.get_room(room_id)
if room is None:
raise NotFoundError("Unknown room")
return 200, {"visibility": "public" if room["is_public"] else "private"}
- async def on_PUT(self, request, room_id):
+ async def on_PUT(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
@@ -142,7 +154,9 @@ class ClientDirectoryListServer(RestServlet):
return 200, {}
- async def on_DELETE(self, request, room_id):
+ async def on_DELETE(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await self.directory_handler.edit_published_room_list(
@@ -157,21 +171,27 @@ class ClientAppserviceDirectoryListServer(RestServlet):
"/directory/list/appservice/(?P<network_id>[^/]*)/(?P<room_id>[^/]*)$", v1=True
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
- def on_PUT(self, request, network_id, room_id):
+ async def on_PUT(
+ self, request: SynapseRequest, network_id: str, room_id: str
+ ) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
visibility = content.get("visibility", "public")
- return self._edit(request, network_id, room_id, visibility)
+ return await self._edit(request, network_id, room_id, visibility)
- def on_DELETE(self, request, network_id, room_id):
- return self._edit(request, network_id, room_id, "private")
+ async def on_DELETE(
+ self, request: SynapseRequest, network_id: str, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ return await self._edit(request, network_id, room_id, "private")
- async def _edit(self, request, network_id, room_id, visibility):
+ async def _edit(
+ self, request: SynapseRequest, network_id: str, room_id: str, visibility: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if not requester.app_service:
raise AuthError(
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/events.py
index ee7454996e..13b72a045a 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/events.py
@@ -14,11 +14,18 @@
"""This module contains REST servlets to do with event streaming, /events."""
import logging
+from typing import TYPE_CHECKING, Dict, List, Tuple, Union
from synapse.api.errors import SynapseError
-from synapse.http.servlet import RestServlet
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.http.server import HttpServer
+from synapse.http.servlet import RestServlet, parse_string
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
from synapse.streams.config import PaginationConfig
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -28,31 +35,30 @@ class EventStreamRestServlet(RestServlet):
DEFAULT_LONGPOLL_TIME_MS = 30000
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.event_stream_handler = hs.get_event_stream_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
is_guest = requester.is_guest
- room_id = None
+ args: Dict[bytes, List[bytes]] = request.args # type: ignore
if is_guest:
- if b"room_id" not in request.args:
+ if b"room_id" not in args:
raise SynapseError(400, "Guest users must specify room_id param")
- if b"room_id" in request.args:
- room_id = request.args[b"room_id"][0].decode("ascii")
+ room_id = parse_string(request, "room_id")
pagin_config = await PaginationConfig.from_request(self.store, request)
timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
- if b"timeout" in request.args:
+ if b"timeout" in args:
try:
- timeout = int(request.args[b"timeout"][0])
+ timeout = int(args[b"timeout"][0])
except ValueError:
raise SynapseError(400, "timeout must be in milliseconds.")
- as_client_event = b"raw" not in request.args
+ as_client_event = b"raw" not in args
chunk = await self.event_stream_handler.get_stream(
requester.user.to_string(),
@@ -70,25 +76,27 @@ class EventStreamRestServlet(RestServlet):
class EventRestServlet(RestServlet):
PATTERNS = client_patterns("/events/(?P<event_id>[^/]*)$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.clock = hs.get_clock()
self.event_handler = hs.get_event_handler()
self.auth = hs.get_auth()
self._event_serializer = hs.get_event_client_serializer()
- async def on_GET(self, request, event_id):
+ async def on_GET(
+ self, request: SynapseRequest, event_id: str
+ ) -> Tuple[int, Union[str, JsonDict]]:
requester = await self.auth.get_user_by_req(request)
event = await self.event_handler.get_event(requester.user, None, event_id)
time_now = self.clock.time_msec()
if event:
- event = await self._event_serializer.serialize_event(event, time_now)
- return 200, event
+ result = await self._event_serializer.serialize_event(event, time_now)
+ return 200, result
else:
return 404, "Event not found."
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EventStreamRestServlet(hs).register(http_server)
EventRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/filter.py
index 411667a9c8..6ed60c7418 100644
--- a/synapse/rest/client/v2_alpha/filter.py
+++ b/synapse/rest/client/filter.py
@@ -13,26 +13,34 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, NotFoundError, StoreError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.types import UserID
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict, UserID
from ._base import client_patterns, set_timeline_upper_limit
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class GetFilterRestServlet(RestServlet):
PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/filter/(?P<filter_id>[^/]*)")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.filtering = hs.get_filtering()
- async def on_GET(self, request, user_id, filter_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str, filter_id: str
+ ) -> Tuple[int, JsonDict]:
target_user = UserID.from_string(user_id)
requester = await self.auth.get_user_by_req(request)
@@ -43,13 +51,13 @@ class GetFilterRestServlet(RestServlet):
raise AuthError(403, "Can only get filters for local users")
try:
- filter_id = int(filter_id)
+ filter_id_int = int(filter_id)
except Exception:
raise SynapseError(400, "Invalid filter_id")
try:
filter_collection = await self.filtering.get_user_filter(
- user_localpart=target_user.localpart, filter_id=filter_id
+ user_localpart=target_user.localpart, filter_id=filter_id_int
)
except StoreError as e:
if e.code != 404:
@@ -62,13 +70,15 @@ class GetFilterRestServlet(RestServlet):
class CreateFilterRestServlet(RestServlet):
PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/filter")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.filtering = hs.get_filtering()
- async def on_POST(self, request, user_id):
+ async def on_POST(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
target_user = UserID.from_string(user_id)
requester = await self.auth.get_user_by_req(request)
@@ -89,6 +99,6 @@ class CreateFilterRestServlet(RestServlet):
return 200, {"filter_id": str(filter_id)}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
GetFilterRestServlet(hs).register(http_server)
CreateFilterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/groups.py
index 6285680c00..a7e9aa3e9b 100644
--- a/synapse/rest/client/v2_alpha/groups.py
+++ b/synapse/rest/client/groups.py
@@ -15,7 +15,7 @@
import logging
from functools import wraps
-from typing import TYPE_CHECKING, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple
from twisted.web.server import Request
@@ -26,6 +26,7 @@ from synapse.api.constants import (
)
from synapse.api.errors import Codes, SynapseError
from synapse.handlers.groups_local import GroupsLocalHandler
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
@@ -42,14 +43,18 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-def _validate_group_id(f):
+def _validate_group_id(
+ f: Callable[..., Awaitable[Tuple[int, JsonDict]]]
+) -> Callable[..., Awaitable[Tuple[int, JsonDict]]]:
"""Wrapper to validate the form of the group ID.
Can be applied to any on_FOO methods that accepts a group ID as a URL parameter.
"""
@wraps(f)
- def wrapper(self, request: Request, group_id: str, *args, **kwargs):
+ def wrapper(
+ self: RestServlet, request: Request, group_id: str, *args: Any, **kwargs: Any
+ ) -> Awaitable[Tuple[int, JsonDict]]:
if not GroupID.is_valid(group_id):
raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
@@ -155,7 +160,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
group_id: str,
category_id: Optional[str],
room_id: str,
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -187,7 +192,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
@_validate_group_id
async def on_DELETE(
self, request: SynapseRequest, group_id: str, category_id: str, room_id: str
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -450,7 +455,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
@_validate_group_id
async def on_DELETE(
self, request: SynapseRequest, group_id: str, role_id: str, user_id: str
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -673,7 +678,7 @@ class GroupAdminRoomsConfigServlet(RestServlet):
@_validate_group_id
async def on_PUT(
self, request: SynapseRequest, group_id: str, room_id: str, config_key: str
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -705,7 +710,7 @@ class GroupAdminUsersInviteServlet(RestServlet):
@_validate_group_id
async def on_PUT(
- self, request: SynapseRequest, group_id, user_id
+ self, request: SynapseRequest, group_id: str, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -737,7 +742,7 @@ class GroupAdminUsersKickServlet(RestServlet):
@_validate_group_id
async def on_PUT(
- self, request: SynapseRequest, group_id, user_id
+ self, request: SynapseRequest, group_id: str, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -930,7 +935,7 @@ class GroupsForUserServlet(RestServlet):
return 200, result
-def register_servlets(hs: "HomeServer", http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
GroupServlet(hs).register(http_server)
GroupSummaryServlet(hs).register(http_server)
GroupInvitedUsersServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/initial_sync.py
index bef1edc838..49b1037b28 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/initial_sync.py
@@ -12,25 +12,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import TYPE_CHECKING, Dict, List, Tuple
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
from synapse.streams.config import PaginationConfig
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
# TODO: Needs unit testing
class InitialSyncRestServlet(RestServlet):
PATTERNS = client_patterns("/initialSync$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.initial_sync_handler = hs.get_initial_sync_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- as_client_event = b"raw" not in request.args
+ args: Dict[bytes, List[bytes]] = request.args # type: ignore
+ as_client_event = b"raw" not in args
pagination_config = await PaginationConfig.from_request(self.store, request)
include_archived = parse_boolean(request, "archived", default=False)
content = await self.initial_sync_handler.snapshot_all_rooms(
@@ -43,5 +51,5 @@ class InitialSyncRestServlet(RestServlet):
return 200, content
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
InitialSyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/keys.py
index d0d9d30d40..7281b2ee29 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/keys.py
@@ -15,19 +15,25 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Any, Optional, Tuple
-from synapse.api.errors import SynapseError
+from synapse.api.errors import InvalidAPICallError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_integer,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import log_kv, set_tag, trace
-from synapse.types import StreamToken
+from synapse.types import JsonDict, StreamToken
from ._base import client_patterns, interactive_auth_handler
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -59,18 +65,16 @@ class KeyUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
self.device_handler = hs.get_device_handler()
@trace(opname="upload_keys")
- async def on_POST(self, request, device_id):
+ async def on_POST(
+ self, request: SynapseRequest, device_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
@@ -148,21 +152,30 @@ class KeyQueryServlet(RestServlet):
PATTERNS = client_patterns("/keys/query$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer):
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
device_id = requester.device_id
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
+
+ device_keys = body.get("device_keys")
+ if not isinstance(device_keys, dict):
+ raise InvalidAPICallError("'device_keys' must be a JSON object")
+
+ def is_list_of_strings(values: Any) -> bool:
+ return isinstance(values, list) and all(isinstance(v, str) for v in values)
+
+ if any(not is_list_of_strings(keys) for keys in device_keys.values()):
+ raise InvalidAPICallError(
+ "'device_keys' values must be a list of strings",
+ )
+
result = await self.e2e_keys_handler.query_devices(
body, timeout, user_id, device_id
)
@@ -181,17 +194,13 @@ class KeyChangesServlet(RestServlet):
PATTERNS = client_patterns("/keys/changes$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer):
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
self.store = hs.get_datastore()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
from_token_string = parse_string(request, "from", required=True)
@@ -231,12 +240,12 @@ class OneTimeKeyServlet(RestServlet):
PATTERNS = client_patterns("/keys/claim$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await self.auth.get_user_by_req(request, allow_guest=True)
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
@@ -255,11 +264,7 @@ class SigningKeyUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/device_signing/upload$", releases=())
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -267,7 +272,7 @@ class SigningKeyUploadServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
@@ -315,16 +320,12 @@ class SignaturesUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/signatures/upload$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
@@ -335,7 +336,7 @@ class SignaturesUploadServlet(RestServlet):
return 200, result
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KeyUploadServlet(hs).register(http_server)
KeyQueryServlet(hs).register(http_server)
KeyChangesServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/knock.py b/synapse/rest/client/knock.py
index 7d1bc40658..0152a0c66a 100644
--- a/synapse/rest/client/v2_alpha/knock.py
+++ b/synapse/rest/client/knock.py
@@ -13,12 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from twisted.web.server import Request
from synapse.api.constants import Membership
from synapse.api.errors import SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
@@ -95,7 +96,9 @@ class KnockRoomAliasServlet(RestServlet):
return 200, {"room_id": room_id}
- def on_PUT(self, request: Request, room_identifier: str, txn_id: str):
+ def on_PUT(
+ self, request: Request, room_identifier: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -103,5 +106,5 @@ class KnockRoomAliasServlet(RestServlet):
)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KnockRoomAliasServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/login.py
index 11567bf32c..4be502a77b 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/login.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
import logging
import re
-from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple
from typing_extensions import TypedDict
@@ -34,7 +34,7 @@ from synapse.http.servlet import (
parse_string,
)
from synapse.http.site import SynapseRequest
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.rest.client._base import client_patterns
from synapse.rest.well_known import WellKnownBuilder
from synapse.types import JsonDict, UserID
@@ -104,7 +104,13 @@ class LoginRestServlet(RestServlet):
burst_count=self.hs.config.rc_login_account.burst_count,
)
- def on_GET(self, request: SynapseRequest):
+ # ensure the CAS/SAML/OIDC handlers are loaded on this worker instance.
+ # The reason for this is to ensure that the auth_provider_ids are registered
+ # with SsoHandler, which in turn ensures that the login/registration prometheus
+ # counters are initialised for the auth_provider_ids.
+ _load_sso_handlers(hs)
+
+ def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
flows = []
if self.jwt_enabled:
flows.append({"type": LoginRestServlet.JWT_TYPE})
@@ -151,7 +157,7 @@ class LoginRestServlet(RestServlet):
return 200, {"flows": flows}
- async def on_POST(self, request: SynapseRequest):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, LoginResponse]:
login_submission = parse_json_object_from_request(request)
if self._msc2918_enabled:
@@ -211,7 +217,7 @@ class LoginRestServlet(RestServlet):
login_submission: JsonDict,
appservice: ApplicationService,
should_issue_refresh_token: bool = False,
- ):
+ ) -> LoginResponse:
identifier = login_submission.get("identifier")
logger.info("Got appservice login request with identifier: %r", identifier)
@@ -461,10 +467,7 @@ class RefreshTokenServlet(RestServlet):
self._clock = hs.get_clock()
self.access_token_lifetime = hs.config.access_token_lifetime
- async def on_POST(
- self,
- request: SynapseRequest,
- ):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
refresh_submission = parse_json_object_from_request(request)
assert_params_in_dict(refresh_submission, ["refresh_token"])
@@ -499,12 +502,7 @@ class SsoRedirectServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
# make sure that the relevant handlers are instantiated, so that they
# register themselves with the main SSOHandler.
- if hs.config.cas_enabled:
- hs.get_cas_handler()
- if hs.config.saml2_enabled:
- hs.get_saml_handler()
- if hs.config.oidc_enabled:
- hs.get_oidc_handler()
+ _load_sso_handlers(hs)
self._sso_handler = hs.get_sso_handler()
self._msc2858_enabled = hs.config.experimental.msc2858_enabled
self._public_baseurl = hs.config.public_baseurl
@@ -569,7 +567,7 @@ class SsoRedirectServlet(RestServlet):
class CasTicketServlet(RestServlet):
PATTERNS = client_patterns("/login/cas/ticket", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self._cas_handler = hs.get_cas_handler()
@@ -591,10 +589,26 @@ class CasTicketServlet(RestServlet):
)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
LoginRestServlet(hs).register(http_server)
if hs.config.access_token_lifetime is not None:
RefreshTokenServlet(hs).register(http_server)
SsoRedirectServlet(hs).register(http_server)
if hs.config.cas_enabled:
CasTicketServlet(hs).register(http_server)
+
+
+def _load_sso_handlers(hs: "HomeServer") -> None:
+ """Ensure that the SSO handlers are loaded, if they are enabled by configuration.
+
+ This is mostly useful to ensure that the CAS/SAML/OIDC handlers register themselves
+ with the main SsoHandler.
+
+ It's safe to call this multiple times.
+ """
+ if hs.config.cas.cas_enabled:
+ hs.get_cas_handler()
+ if hs.config.saml2.saml2_enabled:
+ hs.get_saml_handler()
+ if hs.config.oidc.oidc_enabled:
+ hs.get_oidc_handler()
diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/logout.py
index 5aa7908d73..193a6951b9 100644
--- a/synapse/rest/client/v1/logout.py
+++ b/synapse/rest/client/logout.py
@@ -13,9 +13,16 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -23,13 +30,13 @@ logger = logging.getLogger(__name__)
class LogoutRestServlet(RestServlet):
PATTERNS = client_patterns("/logout$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True)
if requester.device_id is None:
@@ -48,13 +55,13 @@ class LogoutRestServlet(RestServlet):
class LogoutAllRestServlet(RestServlet):
PATTERNS = client_patterns("/logout/all$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True)
user_id = requester.user.to_string()
@@ -67,6 +74,6 @@ class LogoutAllRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
LogoutRestServlet(hs).register(http_server)
LogoutAllRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/notifications.py
index 0ede643c2d..d1d8a984c6 100644
--- a/synapse/rest/client/v2_alpha/notifications.py
+++ b/synapse/rest/client/notifications.py
@@ -13,26 +13,33 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.events.utils import format_event_for_client_v2_without_room_id
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_integer, parse_string
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class NotificationsServlet(RestServlet):
PATTERNS = client_patterns("/notifications$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self._event_serializer = hs.get_event_client_serializer()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
@@ -87,5 +94,5 @@ class NotificationsServlet(RestServlet):
return 200, {"notifications": returned_push_actions, "next_token": next_token}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
NotificationsServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/openid.py
index e8d2673819..4dda6dce4b 100644
--- a/synapse/rest/client/v2_alpha/openid.py
+++ b/synapse/rest/client/openid.py
@@ -12,15 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from synapse.util.stringutils import random_string
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -58,14 +64,16 @@ class IdTokenServlet(RestServlet):
EXPIRES_MS = 3600 * 1000
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.server_name = hs.config.server_name
- async def on_POST(self, request, user_id):
+ async def on_POST(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot request tokens for other users.")
@@ -90,5 +98,5 @@ class IdTokenServlet(RestServlet):
)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
IdTokenServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/password_policy.py
index a83927aee6..6d64efb165 100644
--- a/synapse/rest/client/v2_alpha/password_policy.py
+++ b/synapse/rest/client/password_policy.py
@@ -13,28 +13,32 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
+from twisted.web.server import Request
+
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class PasswordPolicyServlet(RestServlet):
PATTERNS = client_patterns("/password_policy$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.policy = hs.config.password_policy
self.enabled = hs.config.password_policy_enabled
- def on_GET(self, request):
+ def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
if not self.enabled or not self.policy:
return (200, {})
@@ -53,5 +57,5 @@ class PasswordPolicyServlet(RestServlet):
return (200, policy)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
PasswordPolicyServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/presence.py
index 2b24fe5aa6..94dd4fe2f4 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/presence.py
@@ -15,12 +15,18 @@
""" This module contains REST servlets to do with presence: /presence/<paths>
"""
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, SynapseError
from synapse.handlers.presence import format_user_presence_state
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.rest.client.v2_alpha._base import client_patterns
-from synapse.types import UserID
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -28,7 +34,7 @@ logger = logging.getLogger(__name__)
class PresenceStatusRestServlet(RestServlet):
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.presence_handler = hs.get_presence_handler()
@@ -37,7 +43,9 @@ class PresenceStatusRestServlet(RestServlet):
self._use_presence = hs.config.server.use_presence
- async def on_GET(self, request, user_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
@@ -53,13 +61,15 @@ class PresenceStatusRestServlet(RestServlet):
raise AuthError(403, "You are not allowed to see their presence.")
state = await self.presence_handler.get_state(target_user=user)
- state = format_user_presence_state(
+ result = format_user_presence_state(
state, self.clock.time_msec(), include_user_id=False
)
- return 200, state
+ return 200, result
- async def on_PUT(self, request, user_id):
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
@@ -91,5 +101,5 @@ class PresenceStatusRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
PresenceStatusRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/profile.py
index f42f4b3567..d0f20de569 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/profile.py
@@ -14,22 +14,31 @@
""" This module contains REST servlets to do with profile: /profile/<paths> """
+from typing import TYPE_CHECKING, Tuple
+
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.rest.client.v2_alpha._base import client_patterns
-from synapse.types import UserID
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
class ProfileDisplaynameRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, user_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester_user = None
if self.hs.config.require_auth_for_profile_requests:
@@ -48,7 +57,9 @@ class ProfileDisplaynameRestServlet(RestServlet):
return 200, ret
- async def on_PUT(self, request, user_id):
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user = UserID.from_string(user_id)
is_admin = await self.auth.is_server_admin(requester.user)
@@ -72,13 +83,15 @@ class ProfileDisplaynameRestServlet(RestServlet):
class ProfileAvatarURLRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, user_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester_user = None
if self.hs.config.require_auth_for_profile_requests:
@@ -97,7 +110,9 @@ class ProfileAvatarURLRestServlet(RestServlet):
return 200, ret
- async def on_PUT(self, request, user_id):
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
is_admin = await self.auth.is_server_admin(requester.user)
@@ -120,13 +135,15 @@ class ProfileAvatarURLRestServlet(RestServlet):
class ProfileRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, user_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester_user = None
if self.hs.config.require_auth_for_profile_requests:
@@ -149,7 +166,7 @@ class ProfileRestServlet(RestServlet):
return 200, ret
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ProfileDisplaynameRestServlet(hs).register(http_server)
ProfileAvatarURLRestServlet(hs).register(http_server)
ProfileRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/push_rule.py
index be29a0b39e..fb3211bf3a 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/push_rule.py
@@ -12,22 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, Union
+
+import attr
+
from synapse.api.errors import (
NotFoundError,
StoreError,
SynapseError,
UnrecognizedRequestError,
)
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_value_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.push.baserules import BASE_RULE_IDS, NEW_RULE_IDS
from synapse.push.clientformat import format_push_rules_for_user
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.rest.client._base import client_patterns
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RuleSpec:
+ scope: str
+ template: str
+ rule_id: str
+ attr: Optional[str]
class PushRuleRestServlet(RestServlet):
@@ -36,7 +54,7 @@ class PushRuleRestServlet(RestServlet):
"Unrecognised request: You probably wanted a trailing slash"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
@@ -45,7 +63,7 @@ class PushRuleRestServlet(RestServlet):
self._users_new_default_push_rules = hs.config.users_new_default_push_rules
- async def on_PUT(self, request, path):
+ async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
if self._is_worker:
raise Exception("Cannot handle PUT /push_rules on worker")
@@ -57,25 +75,25 @@ class PushRuleRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
- if "/" in spec["rule_id"] or "\\" in spec["rule_id"]:
+ if "/" in spec.rule_id or "\\" in spec.rule_id:
raise SynapseError(400, "rule_id may not contain slashes")
content = parse_json_value_from_request(request)
user_id = requester.user.to_string()
- if "attr" in spec:
+ if spec.attr:
await self.set_rule_attr(user_id, spec, content)
self.notify_user(user_id)
return 200, {}
- if spec["rule_id"].startswith("."):
+ if spec.rule_id.startswith("."):
# Rule ids starting with '.' are reserved for server default rules.
raise SynapseError(400, "cannot add new rule_ids that start with '.'")
try:
(conditions, actions) = _rule_tuple_from_request_object(
- spec["template"], spec["rule_id"], content
+ spec.template, spec.rule_id, content
)
except InvalidRuleException as e:
raise SynapseError(400, str(e))
@@ -106,7 +124,9 @@ class PushRuleRestServlet(RestServlet):
return 200, {}
- async def on_DELETE(self, request, path):
+ async def on_DELETE(
+ self, request: SynapseRequest, path: str
+ ) -> Tuple[int, JsonDict]:
if self._is_worker:
raise Exception("Cannot handle DELETE /push_rules on worker")
@@ -127,7 +147,7 @@ class PushRuleRestServlet(RestServlet):
else:
raise
- async def on_GET(self, request, path):
+ async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
@@ -138,40 +158,42 @@ class PushRuleRestServlet(RestServlet):
rules = format_push_rules_for_user(requester.user, rules)
- path = path.split("/")[1:]
+ path_parts = path.split("/")[1:]
- if path == []:
+ if path_parts == []:
# we're a reference impl: pedantry is our job.
raise UnrecognizedRequestError(
PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
)
- if path[0] == "":
+ if path_parts[0] == "":
return 200, rules
- elif path[0] == "global":
- result = _filter_ruleset_with_path(rules["global"], path[1:])
+ elif path_parts[0] == "global":
+ result = _filter_ruleset_with_path(rules["global"], path_parts[1:])
return 200, result
else:
raise UnrecognizedRequestError()
- def notify_user(self, user_id):
+ def notify_user(self, user_id: str) -> None:
stream_id = self.store.get_max_push_rules_stream_id()
self.notifier.on_new_event("push_rules_key", stream_id, users=[user_id])
- async def set_rule_attr(self, user_id, spec, val):
- if spec["attr"] not in ("enabled", "actions"):
+ async def set_rule_attr(
+ self, user_id: str, spec: RuleSpec, val: Union[bool, JsonDict]
+ ) -> None:
+ if spec.attr not in ("enabled", "actions"):
# for the sake of potential future expansion, shouldn't report
# 404 in the case of an unknown request so check it corresponds to
# a known attribute first.
raise UnrecognizedRequestError()
namespaced_rule_id = _namespaced_rule_id_from_spec(spec)
- rule_id = spec["rule_id"]
+ rule_id = spec.rule_id
is_default_rule = rule_id.startswith(".")
if is_default_rule:
if namespaced_rule_id not in BASE_RULE_IDS:
raise NotFoundError("Unknown rule %s" % (namespaced_rule_id,))
- if spec["attr"] == "enabled":
+ if spec.attr == "enabled":
if isinstance(val, dict) and "enabled" in val:
val = val["enabled"]
if not isinstance(val, bool):
@@ -179,14 +201,18 @@ class PushRuleRestServlet(RestServlet):
# This should *actually* take a dict, but many clients pass
# bools directly, so let's not break them.
raise SynapseError(400, "Value for 'enabled' must be boolean")
- return await self.store.set_push_rule_enabled(
+ await self.store.set_push_rule_enabled(
user_id, namespaced_rule_id, val, is_default_rule
)
- elif spec["attr"] == "actions":
+ elif spec.attr == "actions":
+ if not isinstance(val, dict):
+ raise SynapseError(400, "Value must be a dict")
actions = val.get("actions")
+ if not isinstance(actions, list):
+ raise SynapseError(400, "Value for 'actions' must be dict")
_check_actions(actions)
namespaced_rule_id = _namespaced_rule_id_from_spec(spec)
- rule_id = spec["rule_id"]
+ rule_id = spec.rule_id
is_default_rule = rule_id.startswith(".")
if is_default_rule:
if user_id in self._users_new_default_push_rules:
@@ -196,22 +222,21 @@ class PushRuleRestServlet(RestServlet):
if namespaced_rule_id not in rule_ids:
raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,))
- return await self.store.set_push_rule_actions(
+ await self.store.set_push_rule_actions(
user_id, namespaced_rule_id, actions, is_default_rule
)
else:
raise UnrecognizedRequestError()
-def _rule_spec_from_path(path):
+def _rule_spec_from_path(path: Sequence[str]) -> RuleSpec:
"""Turn a sequence of path components into a rule spec
Args:
- path (sequence[unicode]): the URL path components.
+ path: the URL path components.
Returns:
- dict: rule spec dict, containing scope/template/rule_id entries,
- and possibly attr.
+ rule spec, containing scope/template/rule_id entries, and possibly attr.
Raises:
UnrecognizedRequestError if the path components cannot be parsed.
@@ -237,17 +262,18 @@ def _rule_spec_from_path(path):
rule_id = path[0]
- spec = {"scope": scope, "template": template, "rule_id": rule_id}
-
path = path[1:]
+ attr = None
if len(path) > 0 and len(path[0]) > 0:
- spec["attr"] = path[0]
+ attr = path[0]
- return spec
+ return RuleSpec(scope, template, rule_id, attr)
-def _rule_tuple_from_request_object(rule_template, rule_id, req_obj):
+def _rule_tuple_from_request_object(
+ rule_template: str, rule_id: str, req_obj: JsonDict
+) -> Tuple[List[JsonDict], List[Union[str, JsonDict]]]:
if rule_template in ["override", "underride"]:
if "conditions" not in req_obj:
raise InvalidRuleException("Missing 'conditions'")
@@ -277,7 +303,7 @@ def _rule_tuple_from_request_object(rule_template, rule_id, req_obj):
return conditions, actions
-def _check_actions(actions):
+def _check_actions(actions: List[Union[str, JsonDict]]) -> None:
if not isinstance(actions, list):
raise InvalidRuleException("No actions found")
@@ -290,7 +316,7 @@ def _check_actions(actions):
raise InvalidRuleException("Unrecognised action")
-def _filter_ruleset_with_path(ruleset, path):
+def _filter_ruleset_with_path(ruleset: JsonDict, path: List[str]) -> JsonDict:
if path == []:
raise UnrecognizedRequestError(
PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
@@ -315,7 +341,7 @@ def _filter_ruleset_with_path(ruleset, path):
if r["rule_id"] == rule_id:
the_rule = r
if the_rule is None:
- raise NotFoundError
+ raise NotFoundError()
path = path[1:]
if len(path) == 0:
@@ -330,25 +356,25 @@ def _filter_ruleset_with_path(ruleset, path):
raise UnrecognizedRequestError()
-def _priority_class_from_spec(spec):
- if spec["template"] not in PRIORITY_CLASS_MAP.keys():
- raise InvalidRuleException("Unknown template: %s" % (spec["template"]))
- pc = PRIORITY_CLASS_MAP[spec["template"]]
+def _priority_class_from_spec(spec: RuleSpec) -> int:
+ if spec.template not in PRIORITY_CLASS_MAP.keys():
+ raise InvalidRuleException("Unknown template: %s" % (spec.template))
+ pc = PRIORITY_CLASS_MAP[spec.template]
return pc
-def _namespaced_rule_id_from_spec(spec):
- return _namespaced_rule_id(spec, spec["rule_id"])
+def _namespaced_rule_id_from_spec(spec: RuleSpec) -> str:
+ return _namespaced_rule_id(spec, spec.rule_id)
-def _namespaced_rule_id(spec, rule_id):
- return "global/%s/%s" % (spec["template"], rule_id)
+def _namespaced_rule_id(spec: RuleSpec, rule_id: str) -> str:
+ return "global/%s/%s" % (spec.template, rule_id)
class InvalidRuleException(Exception):
pass
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
PushRuleRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/pusher.py
index 18102eca6c..98604a9388 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/pusher.py
@@ -13,17 +13,23 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, StoreError, SynapseError
-from synapse.http.server import respond_with_html_bytes
+from synapse.http.server import HttpServer, respond_with_html_bytes
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.push import PusherConfigException
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -31,12 +37,12 @@ logger = logging.getLogger(__name__)
class PushersRestServlet(RestServlet):
PATTERNS = client_patterns("/pushers$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user = requester.user
@@ -50,14 +56,14 @@ class PushersRestServlet(RestServlet):
class PushersSetRestServlet(RestServlet):
PATTERNS = client_patterns("/pushers/set$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.notifier = hs.get_notifier()
self.pusher_pool = self.hs.get_pusherpool()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user = requester.user
@@ -132,14 +138,14 @@ class PushersRemoveRestServlet(RestServlet):
PATTERNS = client_patterns("/pushers/remove$", v1=True)
SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.notifier = hs.get_notifier()
self.auth = hs.get_auth()
self.pusher_pool = self.hs.get_pusherpool()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> None:
requester = await self.auth.get_user_by_req(request, rights="delete_pusher")
user = requester.user
@@ -165,7 +171,7 @@ class PushersRemoveRestServlet(RestServlet):
return None
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
PushersRestServlet(hs).register(http_server)
PushersSetRestServlet(hs).register(http_server)
PushersRemoveRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/read_marker.py
index 027f8b81fa..43c04fac6f 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/read_marker.py
@@ -13,27 +13,36 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.constants import ReadReceiptEventFields
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class ReadMarkerRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/read_markers$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.receipts_handler = hs.get_receipts_handler()
self.read_marker_handler = hs.get_read_marker_handler()
self.presence_handler = hs.get_presence_handler()
- async def on_POST(self, request, room_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await self.presence_handler.bump_presence_active_time(requester.user)
@@ -70,5 +79,5 @@ class ReadMarkerRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReadMarkerRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/receipts.py
index 4b98979b47..9770413c61 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/receipts.py
@@ -13,13 +13,20 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.constants import ReadReceiptEventFields
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -30,20 +37,22 @@ class ReceiptRestServlet(RestServlet):
"/(?P<event_id>[^/]*)$"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.receipts_handler = hs.get_receipts_handler()
self.presence_handler = hs.get_presence_handler()
- async def on_POST(self, request, room_id, receipt_type, event_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if receipt_type != "m.read":
raise SynapseError(400, "Receipt type must be 'm.read'")
- body = parse_json_object_from_request(request)
+ body = parse_json_object_from_request(request, allow_empty_body=True)
hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False)
if not isinstance(hidden, bool):
@@ -67,5 +76,5 @@ class ReceiptRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReceiptRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/register.py
index 4d31584acd..8f3dd2a101 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/register.py
@@ -12,10 +12,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import hmac
import logging
import random
-from typing import List, Union
+from typing import TYPE_CHECKING, List, Optional, Tuple
+
+from twisted.web.server import Request
import synapse
import synapse.api.auth
@@ -28,16 +29,15 @@ from synapse.api.errors import (
ThreepidValidationError,
UnrecognizedRequestError,
)
+from synapse.api.ratelimiting import Ratelimiter
from synapse.config import ConfigError
-from synapse.config.captcha import CaptchaConfig
-from synapse.config.consent import ConsentConfig
from synapse.config.emailconfig import ThreepidBehaviour
+from synapse.config.homeserver import HomeServerConfig
from synapse.config.ratelimiting import FederationRateLimitConfig
-from synapse.config.registration import RegistrationConfig
from synapse.config.server import is_threepid_reserved
from synapse.handlers.auth import AuthHandler
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
-from synapse.http.server import finish_request, respond_with_html
+from synapse.http.server import HttpServer, finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
@@ -45,6 +45,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
from synapse.types import JsonDict
@@ -59,17 +60,8 @@ from synapse.util.threepids import (
from ._base import client_patterns, interactive_auth_handler
-# We ought to be using hmac.compare_digest() but on older pythons it doesn't
-# exist. It's a _really minor_ security flaw to use plain string comparison
-# because the timing attack is so obscured by all the other code here it's
-# unlikely to make much difference
-if hasattr(hmac, "compare_digest"):
- compare_digest = hmac.compare_digest
-else:
-
- def compare_digest(a, b):
- return a == b
-
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -77,11 +69,7 @@ logger = logging.getLogger(__name__)
class EmailRegisterRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/register/email/requestToken$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
@@ -95,7 +83,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
template_text=self.config.email_registration_template_text,
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -115,7 +103,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See on_POST in EmailThreepidRequestTokenRestServlet
- # in synapse/rest/client/v2_alpha/account.py)
+ # in synapse/rest/client/account.py)
try:
email = validate_email(body["email"])
except ValueError as e:
@@ -183,16 +171,12 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
class MsisdnRegisterRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/register/msisdn/requestToken$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(
@@ -267,11 +251,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
"/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -284,7 +264,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
self.config.email_registration_template_failure_html
)
- async def on_GET(self, request, medium):
+ async def on_GET(self, request: Request, medium: str) -> None:
if medium != "email":
raise SynapseError(
400, "This medium is currently not supported for registration"
@@ -338,11 +318,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
class UsernameAvailabilityRestServlet(RestServlet):
PATTERNS = client_patterns("/register/available")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.registration_handler = hs.get_registration_handler()
@@ -362,7 +338,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
),
)
- async def on_GET(self, request):
+ async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_registration:
raise SynapseError(
403, "Registration has been disabled", errcode=Codes.FORBIDDEN
@@ -379,14 +355,55 @@ class UsernameAvailabilityRestServlet(RestServlet):
return 200, {"available": True}
+class RegistrationTokenValidityRestServlet(RestServlet):
+ """Check the validity of a registration token.
+
+ Example:
+
+ GET /_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity?token=abcd
+
+ 200 OK
+
+ {
+ "valid": true
+ }
+ """
+
+ PATTERNS = client_patterns(
+ f"/org.matrix.msc3231/register/{LoginType.REGISTRATION_TOKEN}/validity",
+ releases=(),
+ unstable=True,
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.hs = hs
+ self.store = hs.get_datastore()
+ self.ratelimiter = Ratelimiter(
+ store=self.store,
+ clock=hs.get_clock(),
+ rate_hz=hs.config.ratelimiting.rc_registration_token_validity.per_second,
+ burst_count=hs.config.ratelimiting.rc_registration_token_validity.burst_count,
+ )
+
+ async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
+ await self.ratelimiter.ratelimit(None, (request.getClientIP(),))
+
+ if not self.hs.config.enable_registration:
+ raise SynapseError(
+ 403, "Registration has been disabled", errcode=Codes.FORBIDDEN
+ )
+
+ token = parse_string(request, "token", required=True)
+ valid = await self.store.registration_token_is_valid(token)
+
+ return 200, {"valid": valid}
+
+
class RegisterRestServlet(RestServlet):
PATTERNS = client_patterns("/register$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
@@ -408,23 +425,21 @@ class RegisterRestServlet(RestServlet):
)
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
client_addr = request.getClientIP()
await self.ratelimiter.ratelimit(None, client_addr, update=False)
- kind = b"user"
- if b"kind" in request.args:
- kind = request.args[b"kind"][0]
+ kind = parse_string(request, "kind", default="user")
- if kind == b"guest":
+ if kind == "guest":
ret = await self._do_guest_registration(body, address=client_addr)
return ret
- elif kind != b"user":
+ elif kind != "user":
raise UnrecognizedRequestError(
- "Do not understand membership kind: %s" % (kind.decode("utf8"),)
+ f"Do not understand membership kind: {kind}",
)
if self._msc2918_enabled:
@@ -631,7 +646,7 @@ class RegisterRestServlet(RestServlet):
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See on_POST in EmailThreepidRequestTokenRestServlet
- # in synapse/rest/client/v2_alpha/account.py)
+ # in synapse/rest/client/account.py)
if medium == "email":
try:
address = canonicalise_email(address)
@@ -686,6 +701,22 @@ class RegisterRestServlet(RestServlet):
)
if registered:
+ # Check if a token was used to authenticate registration
+ registration_token = await self.auth_handler.get_session_data(
+ session_id,
+ UIAuthSessionDataConstants.REGISTRATION_TOKEN,
+ )
+ if registration_token:
+ # Increment the `completed` counter for the token
+ await self.store.use_registration_token(registration_token)
+ # Indicate that the token has been successfully used so that
+ # pending is not decremented again when expiring old UIA sessions.
+ await self.store.mark_ui_auth_stage_complete(
+ session_id,
+ LoginType.REGISTRATION_TOKEN,
+ True,
+ )
+
await self.registration_handler.post_registration_actions(
user_id=registered_user_id,
auth_result=auth_result,
@@ -695,8 +726,12 @@ class RegisterRestServlet(RestServlet):
return 200, return_dict
async def _do_appservice_registration(
- self, username, as_token, body, should_issue_refresh_token: bool = False
- ):
+ self,
+ username: str,
+ as_token: str,
+ body: JsonDict,
+ should_issue_refresh_token: bool = False,
+ ) -> JsonDict:
user_id = await self.registration_handler.appservice_register(
username, as_token
)
@@ -713,7 +748,7 @@ class RegisterRestServlet(RestServlet):
params: JsonDict,
is_appservice_ghost: bool = False,
should_issue_refresh_token: bool = False,
- ):
+ ) -> JsonDict:
"""Complete registration of newly-registered user
Allocates device_id if one was not given; also creates access_token.
@@ -757,7 +792,9 @@ class RegisterRestServlet(RestServlet):
return result
- async def _do_guest_registration(self, params, address=None):
+ async def _do_guest_registration(
+ self, params: JsonDict, address: Optional[str] = None
+ ) -> Tuple[int, JsonDict]:
if not self.hs.config.allow_guest_access:
raise SynapseError(403, "Guest access is disabled")
user_id = await self.registration_handler.register_user(
@@ -795,9 +832,7 @@ class RegisterRestServlet(RestServlet):
def _calculate_registration_flows(
- # technically `config` has to provide *all* of these interfaces, not just one
- config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig],
- auth_handler: AuthHandler,
+ config: HomeServerConfig, auth_handler: AuthHandler
) -> List[List[str]]:
"""Get a suitable flows list for registration
@@ -868,12 +903,18 @@ def _calculate_registration_flows(
for flow in flows:
flow.insert(0, LoginType.RECAPTCHA)
+ # Prepend registration token to all flows if we're requiring a token
+ if config.registration_requires_token:
+ for flow in flows:
+ flow.insert(0, LoginType.REGISTRATION_TOKEN)
+
return flows
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
UsernameAvailabilityRestServlet(hs).register(http_server)
RegistrationSubmitTokenServlet(hs).register(http_server)
+ RegistrationTokenValidityRestServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/relations.py
index 0821cd285f..0b0711c03c 100644
--- a/synapse/rest/client/v2_alpha/relations.py
+++ b/synapse/rest/client/relations.py
@@ -19,25 +19,32 @@ any time to reflect changes in the MSC.
"""
import logging
+from typing import TYPE_CHECKING, Awaitable, Optional, Tuple
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import ShadowBanError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_integer,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.storage.relations import (
AggregationPaginationToken,
PaginationChunk,
RelationPaginationToken,
)
+from synapse.types import JsonDict
from synapse.util.stringutils import random_string
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -59,13 +66,13 @@ class RelationSendServlet(RestServlet):
"/(?P<parent_id>[^/]*)/(?P<relation_type>[^/]*)/(?P<event_type>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.event_creation_handler = hs.get_event_creation_handler()
self.txns = HttpTransactionCache(hs)
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
http_server.register_paths(
"POST",
client_patterns(self.PATTERN + "$", releases=()),
@@ -79,14 +86,35 @@ class RelationSendServlet(RestServlet):
self.__class__.__name__,
)
- def on_PUT(self, request, *args, **kwargs):
+ def on_PUT(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: str,
+ event_type: str,
+ txn_id: Optional[str] = None,
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.txns.fetch_or_execute_request(
- request, self.on_PUT_or_POST, request, *args, **kwargs
+ request,
+ self.on_PUT_or_POST,
+ request,
+ room_id,
+ parent_id,
+ relation_type,
+ event_type,
+ txn_id,
)
async def on_PUT_or_POST(
- self, request, room_id, parent_id, relation_type, event_type, txn_id=None
- ):
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: str,
+ event_type: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
if event_type == EventTypes.Member:
@@ -136,7 +164,7 @@ class RelationPaginationServlet(RestServlet):
releases=(),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
@@ -145,8 +173,13 @@ class RelationPaginationServlet(RestServlet):
self.event_handler = hs.get_event_handler()
async def on_GET(
- self, request, room_id, parent_id, relation_type=None, event_type=None
- ):
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: Optional[str] = None,
+ event_type: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
@@ -156,6 +189,8 @@ class RelationPaginationServlet(RestServlet):
# This gets the original event and checks that a) the event exists and
# b) the user is allowed to view it.
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
+ if event is None:
+ raise SynapseError(404, "Unknown parent event.")
limit = parse_integer(request, "limit", default=5)
from_token_str = parse_string(request, "from")
@@ -233,15 +268,20 @@ class RelationAggregationPaginationServlet(RestServlet):
releases=(),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.event_handler = hs.get_event_handler()
async def on_GET(
- self, request, room_id, parent_id, relation_type=None, event_type=None
- ):
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: Optional[str] = None,
+ event_type: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
@@ -253,6 +293,8 @@ class RelationAggregationPaginationServlet(RestServlet):
# This checks that a) the event exists and b) the user is allowed to
# view it.
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
+ if event is None:
+ raise SynapseError(404, "Unknown parent event.")
if relation_type not in (RelationTypes.ANNOTATION, None):
raise SynapseError(400, "Relation type must be 'annotation'")
@@ -315,7 +357,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
releases=(),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
@@ -323,7 +365,15 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
self._event_serializer = hs.get_event_client_serializer()
self.event_handler = hs.get_event_handler()
- async def on_GET(self, request, room_id, parent_id, relation_type, event_type, key):
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: str,
+ event_type: str,
+ key: str,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
@@ -374,7 +424,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
return 200, return_value
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RelationSendServlet(hs).register(http_server)
RelationPaginationServlet(hs).register(http_server)
RelationAggregationPaginationServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/report_event.py
index 07ea39a8a3..d4a4adb50c 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/report_event.py
@@ -14,26 +14,35 @@
import logging
from http import HTTPStatus
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class ReportEventRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/report/(?P<event_id>[^/]*)$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.store = hs.get_datastore()
- async def on_POST(self, request, room_id, event_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
@@ -64,5 +73,5 @@ class ReportEventRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReportEventRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/room.py
index 502a917588..9b0c546505 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/room.py
@@ -16,22 +16,25 @@
""" This module contains REST servlets to do with rooms: /rooms/<paths> """
import logging
import re
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from urllib import parse as urlparse
-from synapse.api.constants import EventContentFields, EventTypes, Membership
+from twisted.web.server import Request
+
+from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
- HttpResponseException,
InvalidClientCredentialsError,
+ MissingClientTokenError,
ShadowBanError,
SynapseError,
)
from synapse.api.filtering import Filter
-from synapse.appservice import ApplicationService
from synapse.events.utils import format_event_for_client_v2
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
+ ResolveRoomIdMixin,
RestServlet,
assert_params_in_dict,
parse_boolean,
@@ -42,20 +45,11 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag
+from synapse.rest.client._base import client_patterns
from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
-from synapse.types import (
- JsonDict,
- Requester,
- RoomAlias,
- RoomID,
- StreamToken,
- ThirdPartyInstanceID,
- UserID,
- create_requester,
-)
+from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
from synapse.util import json_decoder
from synapse.util.stringutils import parse_and_validate_server_name, random_string
@@ -66,7 +60,7 @@ logger = logging.getLogger(__name__)
class TransactionRestServlet(RestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.txns = HttpTransactionCache(hs)
@@ -74,20 +68,22 @@ class TransactionRestServlet(RestServlet):
class RoomCreateRestServlet(TransactionRestServlet):
# No PATTERN; we have custom dispatch rules here
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._room_creation_handler = hs.get_room_creation_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
PATTERNS = "/createRoom"
register_txn_path(self, PATTERNS, http_server)
- def on_PUT(self, request, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
info, _ = await self._room_creation_handler.create_room(
@@ -96,21 +92,21 @@ class RoomCreateRestServlet(TransactionRestServlet):
return 200, info
- def get_room_config(self, request):
+ def get_room_config(self, request: Request) -> JsonDict:
user_supplied_config = parse_json_object_from_request(request)
return user_supplied_config
# TODO: Needs unit testing for generic events
class RoomStateEventRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /room/$roomid/state/$eventtype
no_state_key = "/rooms/(?P<room_id>[^/]*)/state/(?P<event_type>[^/]*)$"
@@ -145,13 +141,19 @@ class RoomStateEventRestServlet(TransactionRestServlet):
self.__class__.__name__,
)
- def on_GET_no_state_key(self, request, room_id, event_type):
+ def on_GET_no_state_key(
+ self, request: SynapseRequest, room_id: str, event_type: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.on_GET(request, room_id, event_type, "")
- def on_PUT_no_state_key(self, request, room_id, event_type):
+ def on_PUT_no_state_key(
+ self, request: SynapseRequest, room_id: str, event_type: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.on_PUT(request, room_id, event_type, "")
- async def on_GET(self, request, room_id, event_type, state_key):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str, event_type: str, state_key: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
format = parse_string(
request, "format", default="content", allowed_values=["content", "event"]
@@ -174,7 +176,17 @@ class RoomStateEventRestServlet(TransactionRestServlet):
elif format == "content":
return 200, data.get_dict()["content"]
- async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
+ # Format must be event or content, per the parse_string call above.
+ raise RuntimeError(f"Unknown format: {format:r}.")
+
+ async def on_PUT(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_type: str,
+ state_key: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if txn_id:
@@ -220,27 +232,35 @@ class RoomStateEventRestServlet(TransactionRestServlet):
# TODO: Needs unit testing for generic events + feedback
class RoomSendEventRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /rooms/$roomid/send/$event_type[/$txn_id]
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
register_txn_path(self, PATTERNS, http_server, with_get=True)
- async def on_POST(self, request, room_id, event_type, txn_id=None):
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_type: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
content = parse_json_object_from_request(request)
- event_dict = {
+ event_dict: JsonDict = {
"type": event_type,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
}
+ # Twisted will have processed the args by now.
+ assert request.args is not None
if b"ts" in request.args and requester.app_service:
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
@@ -258,10 +278,14 @@ class RoomSendEventRestServlet(TransactionRestServlet):
set_tag("event_id", event_id)
return 200, {"event_id": event_id}
- def on_GET(self, request, room_id, event_type, txn_id):
+ def on_GET(
+ self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
+ ) -> Tuple[int, str]:
return 200, "Not implemented"
- def on_PUT(self, request, room_id, event_type, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -269,408 +293,14 @@ class RoomSendEventRestServlet(TransactionRestServlet):
)
-class RoomBatchSendEventRestServlet(TransactionRestServlet):
- """
- API endpoint which can insert a chunk of events historically back in time
- next to the given `prev_event`.
-
- `chunk_id` comes from `next_chunk_id `in the response of the batch send
- endpoint and is derived from the "insertion" events added to each chunk.
- It's not required for the first batch send.
-
- `state_events_at_start` is used to define the historical state events
- needed to auth the events like join events. These events will float
- outside of the normal DAG as outlier's and won't be visible in the chat
- history which also allows us to insert multiple chunks without having a bunch
- of `@mxid joined the room` noise between each chunk.
-
- `events` is chronological chunk/list of events you want to insert.
- There is a reverse-chronological constraint on chunks so once you insert
- some messages, you can only insert older ones after that.
- tldr; Insert chunks from your most recent history -> oldest history.
-
- POST /_matrix/client/unstable/org.matrix.msc2716/rooms/<roomID>/batch_send?prev_event=<eventID>&chunk_id=<chunkID>
- {
- "events": [ ... ],
- "state_events_at_start": [ ... ]
- }
- """
-
- PATTERNS = (
- re.compile(
- "^/_matrix/client/unstable/org.matrix.msc2716"
- "/rooms/(?P<room_id>[^/]*)/batch_send$"
- ),
- )
-
- def __init__(self, hs):
- super().__init__(hs)
- self.hs = hs
- self.store = hs.get_datastore()
- self.state_store = hs.get_storage().state
- self.event_creation_handler = hs.get_event_creation_handler()
- self.room_member_handler = hs.get_room_member_handler()
- self.auth = hs.get_auth()
-
- async def _inherit_depth_from_prev_ids(self, prev_event_ids) -> int:
- (
- most_recent_prev_event_id,
- most_recent_prev_event_depth,
- ) = await self.store.get_max_depth_of(prev_event_ids)
-
- # We want to insert the historical event after the `prev_event` but before the successor event
- #
- # We inherit depth from the successor event instead of the `prev_event`
- # because events returned from `/messages` are first sorted by `topological_ordering`
- # which is just the `depth` and then tie-break with `stream_ordering`.
- #
- # We mark these inserted historical events as "backfilled" which gives them a
- # negative `stream_ordering`. If we use the same depth as the `prev_event`,
- # then our historical event will tie-break and be sorted before the `prev_event`
- # when it should come after.
- #
- # We want to use the successor event depth so they appear after `prev_event` because
- # it has a larger `depth` but before the successor event because the `stream_ordering`
- # is negative before the successor event.
- successor_event_ids = await self.store.get_successor_events(
- [most_recent_prev_event_id]
- )
-
- # If we can't find any successor events, then it's a forward extremity of
- # historical messages and we can just inherit from the previous historical
- # event which we can already assume has the correct depth where we want
- # to insert into.
- if not successor_event_ids:
- depth = most_recent_prev_event_depth
- else:
- (
- _,
- oldest_successor_depth,
- ) = await self.store.get_min_depth_of(successor_event_ids)
-
- depth = oldest_successor_depth
-
- return depth
-
- def _create_insertion_event_dict(
- self, sender: str, room_id: str, origin_server_ts: int
- ):
- """Creates an event dict for an "insertion" event with the proper fields
- and a random chunk ID.
-
- Args:
- sender: The event author MXID
- room_id: The room ID that the event belongs to
- origin_server_ts: Timestamp when the event was sent
-
- Returns:
- Tuple of event ID and stream ordering position
- """
-
- next_chunk_id = random_string(8)
- insertion_event = {
- "type": EventTypes.MSC2716_INSERTION,
- "sender": sender,
- "room_id": room_id,
- "content": {
- EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
- EventContentFields.MSC2716_HISTORICAL: True,
- },
- "origin_server_ts": origin_server_ts,
- }
-
- return insertion_event
-
- async def _create_requester_for_user_id_from_app_service(
- self, user_id: str, app_service: ApplicationService
- ) -> Requester:
- """Creates a new requester for the given user_id
- and validates that the app service is allowed to control
- the given user.
-
- Args:
- user_id: The author MXID that the app service is controlling
- app_service: The app service that controls the user
-
- Returns:
- Requester object
- """
-
- await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
-
- return create_requester(user_id, app_service=app_service)
-
- async def on_POST(self, request, room_id):
- requester = await self.auth.get_user_by_req(request, allow_guest=False)
-
- if not requester.app_service:
- raise AuthError(
- 403,
- "Only application services can use the /batchsend endpoint",
- )
-
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["state_events_at_start", "events"])
-
- prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
- chunk_id_from_query = parse_string(request, "chunk_id")
-
- if prev_events_from_query is None:
- raise SynapseError(
- 400,
- "prev_event query parameter is required when inserting historical messages back in time",
- errcode=Codes.MISSING_PARAM,
- )
-
- # For the event we are inserting next to (`prev_events_from_query`),
- # find the most recent auth events (derived from state events) that
- # allowed that message to be sent. We will use that as a base
- # to auth our historical messages against.
- (
- most_recent_prev_event_id,
- _,
- ) = await self.store.get_max_depth_of(prev_events_from_query)
- # mapping from (type, state_key) -> state_event_id
- prev_state_map = await self.state_store.get_state_ids_for_event(
- most_recent_prev_event_id
- )
- # List of state event ID's
- prev_state_ids = list(prev_state_map.values())
- auth_event_ids = prev_state_ids
-
- for state_event in body["state_events_at_start"]:
- assert_params_in_dict(
- state_event, ["type", "origin_server_ts", "content", "sender"]
- )
-
- logger.debug(
- "RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s",
- state_event,
- auth_event_ids,
- )
-
- event_dict = {
- "type": state_event["type"],
- "origin_server_ts": state_event["origin_server_ts"],
- "content": state_event["content"],
- "room_id": room_id,
- "sender": state_event["sender"],
- "state_key": state_event["state_key"],
- }
-
- # Make the state events float off on their own
- fake_prev_event_id = "$" + random_string(43)
-
- # TODO: This is pretty much the same as some other code to handle inserting state in this file
- if event_dict["type"] == EventTypes.Member:
- membership = event_dict["content"].get("membership", None)
- event_id, _ = await self.room_member_handler.update_membership(
- await self._create_requester_for_user_id_from_app_service(
- state_event["sender"], requester.app_service
- ),
- target=UserID.from_string(event_dict["state_key"]),
- room_id=room_id,
- action=membership,
- content=event_dict["content"],
- outlier=True,
- prev_event_ids=[fake_prev_event_id],
- # Make sure to use a copy of this list because we modify it
- # later in the loop here. Otherwise it will be the same
- # reference and also update in the event when we append later.
- auth_event_ids=auth_event_ids.copy(),
- )
- else:
- # TODO: Add some complement tests that adds state that is not member joins
- # and will use this code path. Maybe we only want to support join state events
- # and can get rid of this `else`?
- (
- event,
- _,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- await self._create_requester_for_user_id_from_app_service(
- state_event["sender"], requester.app_service
- ),
- event_dict,
- outlier=True,
- prev_event_ids=[fake_prev_event_id],
- # Make sure to use a copy of this list because we modify it
- # later in the loop here. Otherwise it will be the same
- # reference and also update in the event when we append later.
- auth_event_ids=auth_event_ids.copy(),
- )
- event_id = event.event_id
-
- auth_event_ids.append(event_id)
-
- events_to_create = body["events"]
-
- inherited_depth = await self._inherit_depth_from_prev_ids(
- prev_events_from_query
- )
-
- # Figure out which chunk to connect to. If they passed in
- # chunk_id_from_query let's use it. The chunk ID passed in comes
- # from the chunk_id in the "insertion" event from the previous chunk.
- last_event_in_chunk = events_to_create[-1]
- chunk_id_to_connect_to = chunk_id_from_query
- base_insertion_event = None
- if chunk_id_from_query:
- # All but the first base insertion event should point at a fake
- # event, which causes the HS to ask for the state at the start of
- # the chunk later.
- prev_event_ids = [fake_prev_event_id]
- # TODO: Verify the chunk_id_from_query corresponds to an insertion event
- pass
- # Otherwise, create an insertion event to act as a starting point.
- #
- # We don't always have an insertion event to start hanging more history
- # off of (ideally there would be one in the main DAG, but that's not the
- # case if we're wanting to add history to e.g. existing rooms without
- # an insertion event), in which case we just create a new insertion event
- # that can then get pointed to by a "marker" event later.
- else:
- prev_event_ids = prev_events_from_query
-
- base_insertion_event_dict = self._create_insertion_event_dict(
- sender=requester.user.to_string(),
- room_id=room_id,
- origin_server_ts=last_event_in_chunk["origin_server_ts"],
- )
- base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
-
- (
- base_insertion_event,
- _,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- await self._create_requester_for_user_id_from_app_service(
- base_insertion_event_dict["sender"],
- requester.app_service,
- ),
- base_insertion_event_dict,
- prev_event_ids=base_insertion_event_dict.get("prev_events"),
- auth_event_ids=auth_event_ids,
- historical=True,
- depth=inherited_depth,
- )
-
- chunk_id_to_connect_to = base_insertion_event["content"][
- EventContentFields.MSC2716_NEXT_CHUNK_ID
- ]
-
- # Connect this current chunk to the insertion event from the previous chunk
- chunk_event = {
- "type": EventTypes.MSC2716_CHUNK,
- "sender": requester.user.to_string(),
- "room_id": room_id,
- "content": {EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to},
- # Since the chunk event is put at the end of the chunk,
- # where the newest-in-time event is, copy the origin_server_ts from
- # the last event we're inserting
- "origin_server_ts": last_event_in_chunk["origin_server_ts"],
- }
- # Add the chunk event to the end of the chunk (newest-in-time)
- events_to_create.append(chunk_event)
-
- # Add an "insertion" event to the start of each chunk (next to the oldest-in-time
- # event in the chunk) so the next chunk can be connected to this one.
- insertion_event = self._create_insertion_event_dict(
- sender=requester.user.to_string(),
- room_id=room_id,
- # Since the insertion event is put at the start of the chunk,
- # where the oldest-in-time event is, copy the origin_server_ts from
- # the first event we're inserting
- origin_server_ts=events_to_create[0]["origin_server_ts"],
- )
- # Prepend the insertion event to the start of the chunk (oldest-in-time)
- events_to_create = [insertion_event] + events_to_create
-
- event_ids = []
- events_to_persist = []
- for ev in events_to_create:
- assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
-
- # Mark all events as historical
- # This has important semantics within the Synapse internals to backfill properly
- ev["content"][EventContentFields.MSC2716_HISTORICAL] = True
-
- event_dict = {
- "type": ev["type"],
- "origin_server_ts": ev["origin_server_ts"],
- "content": ev["content"],
- "room_id": room_id,
- "sender": ev["sender"], # requester.user.to_string(),
- "prev_events": prev_event_ids.copy(),
- }
-
- event, context = await self.event_creation_handler.create_event(
- await self._create_requester_for_user_id_from_app_service(
- ev["sender"], requester.app_service
- ),
- event_dict,
- prev_event_ids=event_dict.get("prev_events"),
- auth_event_ids=auth_event_ids,
- historical=True,
- depth=inherited_depth,
- )
- logger.debug(
- "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
- event,
- prev_event_ids,
- auth_event_ids,
- )
-
- assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
- event.sender,
- )
-
- events_to_persist.append((event, context))
- event_id = event.event_id
-
- event_ids.append(event_id)
- prev_event_ids = [event_id]
-
- # Persist events in reverse-chronological order so they have the
- # correct stream_ordering as they are backfilled (which decrements).
- # Events are sorted by (topological_ordering, stream_ordering)
- # where topological_ordering is just depth.
- for (event, context) in reversed(events_to_persist):
- ev = await self.event_creation_handler.handle_new_client_event(
- await self._create_requester_for_user_id_from_app_service(
- event["sender"], requester.app_service
- ),
- event=event,
- context=context,
- )
-
- # Add the base_insertion_event to the bottom of the list we return
- if base_insertion_event is not None:
- event_ids.append(base_insertion_event.event_id)
-
- return 200, {
- "state_events": auth_event_ids,
- "events": event_ids,
- "next_chunk_id": insertion_event["content"][
- EventContentFields.MSC2716_NEXT_CHUNK_ID
- ],
- }
-
- def on_GET(self, request, room_id):
- return 501, "Not implemented"
-
- def on_PUT(self, request, room_id):
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_id
- )
-
-
# TODO: Needs unit testing for room ID + alias joins
-class JoinRoomAliasServlet(TransactionRestServlet):
- def __init__(self, hs):
+class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.room_member_handler = hs.get_room_member_handler()
+ super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /join/$room_identifier[/$txn_id]
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
@@ -680,7 +310,7 @@ class JoinRoomAliasServlet(TransactionRestServlet):
request: SynapseRequest,
room_identifier: str,
txn_id: Optional[str] = None,
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
try:
@@ -690,24 +320,13 @@ class JoinRoomAliasServlet(TransactionRestServlet):
# cheekily send invalid bodies.
content = {}
- if RoomID.is_valid(room_identifier):
- room_id = room_identifier
-
- # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
- args: Dict[bytes, List[bytes]] = request.args # type: ignore
-
- remote_room_hosts = parse_strings_from_args(
- args, "server_name", required=False
- )
- elif RoomAlias.is_valid(room_identifier):
- handler = self.room_member_handler
- room_alias = RoomAlias.from_string(room_identifier)
- room_id_obj, remote_room_hosts = await handler.lookup_room_alias(room_alias)
- room_id = room_id_obj.to_string()
- else:
- raise SynapseError(
- 400, "%s was not legal room ID or room alias" % (room_identifier,)
- )
+ # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
+ args: Dict[bytes, List[bytes]] = request.args # type: ignore
+ remote_room_hosts = parse_strings_from_args(args, "server_name", required=False)
+ room_id, remote_room_hosts = await self.resolve_room_id(
+ room_identifier,
+ remote_room_hosts,
+ )
await self.room_member_handler.update_membership(
requester=requester,
@@ -722,7 +341,9 @@ class JoinRoomAliasServlet(TransactionRestServlet):
return 200, {"room_id": room_id}
- def on_PUT(self, request, room_identifier, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_identifier: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -734,12 +355,12 @@ class JoinRoomAliasServlet(TransactionRestServlet):
class PublicRoomListRestServlet(TransactionRestServlet):
PATTERNS = client_patterns("/publicRooms$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
server = parse_string(request, "server")
try:
@@ -778,12 +399,9 @@ class PublicRoomListRestServlet(TransactionRestServlet):
Codes.INVALID_PARAM,
)
- try:
- data = await handler.get_remote_public_room_list(
- server, limit=limit, since_token=since_token
- )
- except HttpResponseException as e:
- raise e.to_synapse_error()
+ data = await handler.get_remote_public_room_list(
+ server, limit=limit, since_token=since_token
+ )
else:
data = await handler.get_local_public_room_list(
limit=limit, since_token=since_token
@@ -791,7 +409,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
return 200, data
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await self.auth.get_user_by_req(request, allow_guest=True)
server = parse_string(request, "server")
@@ -831,17 +449,15 @@ class PublicRoomListRestServlet(TransactionRestServlet):
Codes.INVALID_PARAM,
)
- try:
- data = await handler.get_remote_public_room_list(
- server,
- limit=limit,
- since_token=since_token,
- search_filter=search_filter,
- include_all_networks=include_all_networks,
- third_party_instance_id=third_party_instance_id,
- )
- except HttpResponseException as e:
- raise e.to_synapse_error()
+ data = await handler.get_remote_public_room_list(
+ server,
+ limit=limit,
+ since_token=since_token,
+ search_filter=search_filter,
+ include_all_networks=include_all_networks,
+ third_party_instance_id=third_party_instance_id,
+ )
+
else:
data = await handler.get_local_public_room_list(
limit=limit,
@@ -857,13 +473,15 @@ class PublicRoomListRestServlet(TransactionRestServlet):
class RoomMemberListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/members$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
# TODO support Pagination stream API (limit/tokens)
requester = await self.auth.get_user_by_req(request, allow_guest=True)
handler = self.message_handler
@@ -909,12 +527,14 @@ class RoomMemberListRestServlet(RestServlet):
class JoinedRoomMemberListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
users_with_profile = await self.message_handler.get_joined_members(
@@ -928,17 +548,21 @@ class JoinedRoomMemberListRestServlet(RestServlet):
class RoomMessageListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/messages$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.pagination_handler = hs.get_pagination_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = await PaginationConfig.from_request(
self.store, request, default_limit=10
)
+ # Twisted will have processed the args by now.
+ assert request.args is not None
as_client_event = b"raw" not in request.args
filter_str = parse_string(request, "filter", encoding="utf-8")
if filter_str:
@@ -968,12 +592,14 @@ class RoomMessageListRestServlet(RestServlet):
class RoomStateRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/state$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, List[JsonDict]]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
# Get all the current state for this room
events = await self.message_handler.get_state_events(
@@ -988,13 +614,15 @@ class RoomStateRestServlet(RestServlet):
class RoomInitialSyncRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.initial_sync_handler = hs.get_initial_sync_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = await PaginationConfig.from_request(self.store, request)
content = await self.initial_sync_handler.room_initial_sync(
@@ -1008,14 +636,16 @@ class RoomEventServlet(RestServlet):
"/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$", v1=True
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.clock = hs.get_clock()
self.event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id, event_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
try:
event = await self.event_handler.get_event(
@@ -1029,10 +659,10 @@ class RoomEventServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
- event = await self._event_serializer.serialize_event(event, time_now)
- return 200, event
+ event_dict = await self._event_serializer.serialize_event(event, time_now)
+ return 200, event_dict
- return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
+ raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
class RoomEventContextServlet(RestServlet):
@@ -1040,14 +670,16 @@ class RoomEventContextServlet(RestServlet):
"/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$", v1=True
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.clock = hs.get_clock()
self.room_context_handler = hs.get_room_context_handler()
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id, event_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
limit = parse_integer(request, "limit", default=10)
@@ -1088,23 +720,27 @@ class RoomEventContextServlet(RestServlet):
class RoomForgetRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(self, request, room_id, txn_id=None):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, txn_id: Optional[str] = None
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=False)
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
return 200, {}
- def on_PUT(self, request, room_id, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -1114,12 +750,12 @@ class RoomForgetRestServlet(TransactionRestServlet):
# TODO: Needs unit testing
class RoomMembershipRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /rooms/$roomid/[invite|join|leave]
PATTERNS = (
"/rooms/(?P<room_id>[^/]*)/"
@@ -1127,7 +763,13 @@ class RoomMembershipRestServlet(TransactionRestServlet):
)
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(self, request, room_id, membership_action, txn_id=None):
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ membership_action: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
if requester.is_guest and membership_action not in {
@@ -1190,13 +832,15 @@ class RoomMembershipRestServlet(TransactionRestServlet):
return 200, return_value
- def _has_3pid_invite_keys(self, content):
+ def _has_3pid_invite_keys(self, content: JsonDict) -> bool:
for key in {"id_server", "medium", "address"}:
if key not in content:
return False
return True
- def on_PUT(self, request, room_id, membership_action, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -1205,16 +849,22 @@ class RoomMembershipRestServlet(TransactionRestServlet):
class RoomRedactEventRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(self, request, room_id, event_id, txn_id=None):
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_id: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
@@ -1240,7 +890,9 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
set_tag("event_id", event_id)
return 200, {"event_id": event_id}
- def on_PUT(self, request, room_id, event_id, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -1265,7 +917,9 @@ class RoomTypingRestServlet(RestServlet):
hs.config.worker.writers.typing == hs.get_instance_name()
)
- async def on_PUT(self, request, room_id, user_id):
+ async def on_PUT(
+ self, request: SynapseRequest, room_id: str, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if not self._is_typing_writer:
@@ -1316,7 +970,9 @@ class RoomAliasListServlet(RestServlet):
self.auth = hs.get_auth()
self.directory_handler = hs.get_directory_handler()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
alias_list = await self.directory_handler.get_aliases_for_room(
@@ -1329,12 +985,12 @@ class RoomAliasListServlet(RestServlet):
class SearchRestServlet(RestServlet):
PATTERNS = client_patterns("/search$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.search_handler = hs.get_search_handler()
self.auth = hs.get_auth()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
@@ -1348,19 +1004,24 @@ class SearchRestServlet(RestServlet):
class JoinedRoomsRestServlet(RestServlet):
PATTERNS = client_patterns("/joined_rooms$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
room_ids = await self.store.get_rooms_for_user(requester.user.to_string())
return 200, {"joined_rooms": list(room_ids)}
-def register_txn_path(servlet, regex_string, http_server, with_get=False):
+def register_txn_path(
+ servlet: RestServlet,
+ regex_string: str,
+ http_server: HttpServer,
+ with_get: bool = False,
+) -> None:
"""Registers a transaction-based path.
This registers two paths:
@@ -1368,28 +1029,37 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False):
POST regex_string
Args:
- regex_string (str): The regex string to register. Must NOT have a
- trailing $ as this string will be appended to.
- http_server : The http_server to register paths with.
+ regex_string: The regex string to register. Must NOT have a
+ trailing $ as this string will be appended to.
+ http_server: The http_server to register paths with.
with_get: True to also register respective GET paths for the PUTs.
"""
+ on_POST = getattr(servlet, "on_POST", None)
+ on_PUT = getattr(servlet, "on_PUT", None)
+ if on_POST is None or on_PUT is None:
+ raise RuntimeError("on_POST and on_PUT must exist when using register_txn_path")
http_server.register_paths(
"POST",
client_patterns(regex_string + "$", v1=True),
- servlet.on_POST,
+ on_POST,
servlet.__class__.__name__,
)
http_server.register_paths(
"PUT",
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
- servlet.on_PUT,
+ on_PUT,
servlet.__class__.__name__,
)
+ on_GET = getattr(servlet, "on_GET", None)
if with_get:
+ if on_GET is None:
+ raise RuntimeError(
+ "register_txn_path called with with_get = True, but no on_GET method exists"
+ )
http_server.register_paths(
"GET",
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
- servlet.on_GET,
+ on_GET,
servlet.__class__.__name__,
)
@@ -1405,18 +1075,26 @@ class RoomSpaceSummaryRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self._auth = hs.get_auth()
- self._space_summary_handler = hs.get_space_summary_handler()
+ self._room_summary_handler = hs.get_room_summary_handler()
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request, allow_guest=True)
- return 200, await self._space_summary_handler.get_space_summary(
+ max_rooms_per_space = parse_integer(request, "max_rooms_per_space")
+ if max_rooms_per_space is not None and max_rooms_per_space < 0:
+ raise SynapseError(
+ 400,
+ "Value for 'max_rooms_per_space' must be a non-negative integer",
+ Codes.BAD_JSON,
+ )
+
+ return 200, await self._room_summary_handler.get_space_summary(
requester.user.to_string(),
room_id,
suggested_only=parse_boolean(request, "suggested_only", default=False),
- max_rooms_per_space=parse_integer(request, "max_rooms_per_space"),
+ max_rooms_per_space=max_rooms_per_space,
)
# TODO When switching to the stable endpoint, remove the POST handler.
@@ -1433,12 +1111,19 @@ class RoomSpaceSummaryRestServlet(RestServlet):
)
max_rooms_per_space = content.get("max_rooms_per_space")
- if max_rooms_per_space is not None and not isinstance(max_rooms_per_space, int):
- raise SynapseError(
- 400, "'max_rooms_per_space' must be an integer", Codes.BAD_JSON
- )
+ if max_rooms_per_space is not None:
+ if not isinstance(max_rooms_per_space, int):
+ raise SynapseError(
+ 400, "'max_rooms_per_space' must be an integer", Codes.BAD_JSON
+ )
+ if max_rooms_per_space < 0:
+ raise SynapseError(
+ 400,
+ "Value for 'max_rooms_per_space' must be a non-negative integer",
+ Codes.BAD_JSON,
+ )
- return 200, await self._space_summary_handler.get_space_summary(
+ return 200, await self._room_summary_handler.get_space_summary(
requester.user.to_string(),
room_id,
suggested_only=suggested_only,
@@ -1446,9 +1131,87 @@ class RoomSpaceSummaryRestServlet(RestServlet):
)
-def register_servlets(hs: "HomeServer", http_server, is_worker=False):
- msc2716_enabled = hs.config.experimental.msc2716_enabled
+class RoomHierarchyRestServlet(RestServlet):
+ PATTERNS = (
+ re.compile(
+ "^/_matrix/client/unstable/org.matrix.msc2946"
+ "/rooms/(?P<room_id>[^/]*)/hierarchy$"
+ ),
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self._auth = hs.get_auth()
+ self._room_summary_handler = hs.get_room_summary_handler()
+
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self._auth.get_user_by_req(request, allow_guest=True)
+
+ max_depth = parse_integer(request, "max_depth")
+ if max_depth is not None and max_depth < 0:
+ raise SynapseError(
+ 400, "'max_depth' must be a non-negative integer", Codes.BAD_JSON
+ )
+
+ limit = parse_integer(request, "limit")
+ if limit is not None and limit <= 0:
+ raise SynapseError(
+ 400, "'limit' must be a positive integer", Codes.BAD_JSON
+ )
+
+ return 200, await self._room_summary_handler.get_room_hierarchy(
+ requester.user.to_string(),
+ room_id,
+ suggested_only=parse_boolean(request, "suggested_only", default=False),
+ max_depth=max_depth,
+ limit=limit,
+ from_token=parse_string(request, "from"),
+ )
+
+
+class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
+ PATTERNS = (
+ re.compile(
+ "^/_matrix/client/unstable/im.nheko.summary"
+ "/rooms/(?P<room_identifier>[^/]*)/summary$"
+ ),
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+ self._auth = hs.get_auth()
+ self._room_summary_handler = hs.get_room_summary_handler()
+
+ async def on_GET(
+ self, request: SynapseRequest, room_identifier: str
+ ) -> Tuple[int, JsonDict]:
+ try:
+ requester = await self._auth.get_user_by_req(request, allow_guest=True)
+ requester_user_id: Optional[str] = requester.user.to_string()
+ except MissingClientTokenError:
+ # auth is optional
+ requester_user_id = None
+
+ # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
+ args: Dict[bytes, List[bytes]] = request.args # type: ignore
+ remote_room_hosts = parse_strings_from_args(args, "via", required=False)
+ room_id, remote_room_hosts = await self.resolve_room_id(
+ room_identifier,
+ remote_room_hosts,
+ )
+
+ return 200, await self._room_summary_handler.get_room_summary(
+ requester_user_id,
+ room_id,
+ remote_room_hosts,
+ )
+
+def register_servlets(
+ hs: "HomeServer", http_server: HttpServer, is_worker: bool = False
+) -> None:
RoomStateEventRestServlet(hs).register(http_server)
RoomMemberListRestServlet(hs).register(http_server)
JoinedRoomMemberListRestServlet(hs).register(http_server)
@@ -1456,24 +1219,25 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False):
JoinRoomAliasServlet(hs).register(http_server)
RoomMembershipRestServlet(hs).register(http_server)
RoomSendEventRestServlet(hs).register(http_server)
- if msc2716_enabled:
- RoomBatchSendEventRestServlet(hs).register(http_server)
PublicRoomListRestServlet(hs).register(http_server)
RoomStateRestServlet(hs).register(http_server)
RoomRedactEventRestServlet(hs).register(http_server)
RoomTypingRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RoomSpaceSummaryRestServlet(hs).register(http_server)
+ RoomHierarchyRestServlet(hs).register(http_server)
+ if hs.config.experimental.msc3266_enabled:
+ RoomSummaryRestServlet(hs).register(http_server)
RoomEventServlet(hs).register(http_server)
JoinedRoomsRestServlet(hs).register(http_server)
RoomAliasListServlet(hs).register(http_server)
SearchRestServlet(hs).register(http_server)
+ RoomCreateRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process.
if not is_worker:
- RoomCreateRestServlet(hs).register(http_server)
RoomForgetRestServlet(hs).register(http_server)
-def register_deprecated_servlets(hs, http_server):
+def register_deprecated_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomInitialSyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
new file mode 100644
index 0000000000..ed96978448
--- /dev/null
+++ b/synapse/rest/client/room_batch.py
@@ -0,0 +1,454 @@
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+from typing import TYPE_CHECKING, Awaitable, List, Tuple
+
+from twisted.web.server import Request
+
+from synapse.api.constants import EventContentFields, EventTypes
+from synapse.api.errors import AuthError, Codes, SynapseError
+from synapse.appservice import ApplicationService
+from synapse.http.server import HttpServer
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+ parse_string,
+ parse_strings_from_args,
+)
+from synapse.http.site import SynapseRequest
+from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.types import JsonDict, Requester, UserID, create_requester
+from synapse.util.stringutils import random_string
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class RoomBatchSendEventRestServlet(RestServlet):
+ """
+ API endpoint which can insert a chunk of events historically back in time
+ next to the given `prev_event`.
+
+ `chunk_id` comes from `next_chunk_id `in the response of the batch send
+ endpoint and is derived from the "insertion" events added to each chunk.
+ It's not required for the first batch send.
+
+ `state_events_at_start` is used to define the historical state events
+ needed to auth the events like join events. These events will float
+ outside of the normal DAG as outlier's and won't be visible in the chat
+ history which also allows us to insert multiple chunks without having a bunch
+ of `@mxid joined the room` noise between each chunk.
+
+ `events` is chronological chunk/list of events you want to insert.
+ There is a reverse-chronological constraint on chunks so once you insert
+ some messages, you can only insert older ones after that.
+ tldr; Insert chunks from your most recent history -> oldest history.
+
+ POST /_matrix/client/unstable/org.matrix.msc2716/rooms/<roomID>/batch_send?prev_event=<eventID>&chunk_id=<chunkID>
+ {
+ "events": [ ... ],
+ "state_events_at_start": [ ... ]
+ }
+ """
+
+ PATTERNS = (
+ re.compile(
+ "^/_matrix/client/unstable/org.matrix.msc2716"
+ "/rooms/(?P<room_id>[^/]*)/batch_send$"
+ ),
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.hs = hs
+ self.store = hs.get_datastore()
+ self.state_store = hs.get_storage().state
+ self.event_creation_handler = hs.get_event_creation_handler()
+ self.room_member_handler = hs.get_room_member_handler()
+ self.auth = hs.get_auth()
+ self.txns = HttpTransactionCache(hs)
+
+ async def _inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int:
+ (
+ most_recent_prev_event_id,
+ most_recent_prev_event_depth,
+ ) = await self.store.get_max_depth_of(prev_event_ids)
+
+ # We want to insert the historical event after the `prev_event` but before the successor event
+ #
+ # We inherit depth from the successor event instead of the `prev_event`
+ # because events returned from `/messages` are first sorted by `topological_ordering`
+ # which is just the `depth` and then tie-break with `stream_ordering`.
+ #
+ # We mark these inserted historical events as "backfilled" which gives them a
+ # negative `stream_ordering`. If we use the same depth as the `prev_event`,
+ # then our historical event will tie-break and be sorted before the `prev_event`
+ # when it should come after.
+ #
+ # We want to use the successor event depth so they appear after `prev_event` because
+ # it has a larger `depth` but before the successor event because the `stream_ordering`
+ # is negative before the successor event.
+ successor_event_ids = await self.store.get_successor_events(
+ [most_recent_prev_event_id]
+ )
+
+ # If we can't find any successor events, then it's a forward extremity of
+ # historical messages and we can just inherit from the previous historical
+ # event which we can already assume has the correct depth where we want
+ # to insert into.
+ if not successor_event_ids:
+ depth = most_recent_prev_event_depth
+ else:
+ (
+ _,
+ oldest_successor_depth,
+ ) = await self.store.get_min_depth_of(successor_event_ids)
+
+ depth = oldest_successor_depth
+
+ return depth
+
+ def _create_insertion_event_dict(
+ self, sender: str, room_id: str, origin_server_ts: int
+ ) -> JsonDict:
+ """Creates an event dict for an "insertion" event with the proper fields
+ and a random chunk ID.
+
+ Args:
+ sender: The event author MXID
+ room_id: The room ID that the event belongs to
+ origin_server_ts: Timestamp when the event was sent
+
+ Returns:
+ The new event dictionary to insert.
+ """
+
+ next_chunk_id = random_string(8)
+ insertion_event = {
+ "type": EventTypes.MSC2716_INSERTION,
+ "sender": sender,
+ "room_id": room_id,
+ "content": {
+ EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
+ EventContentFields.MSC2716_HISTORICAL: True,
+ },
+ "origin_server_ts": origin_server_ts,
+ }
+
+ return insertion_event
+
+ async def _create_requester_for_user_id_from_app_service(
+ self, user_id: str, app_service: ApplicationService
+ ) -> Requester:
+ """Creates a new requester for the given user_id
+ and validates that the app service is allowed to control
+ the given user.
+
+ Args:
+ user_id: The author MXID that the app service is controlling
+ app_service: The app service that controls the user
+
+ Returns:
+ Requester object
+ """
+
+ await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
+
+ return create_requester(user_id, app_service=app_service)
+
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=False)
+
+ if not requester.app_service:
+ raise AuthError(
+ 403,
+ "Only application services can use the /batchsend endpoint",
+ )
+
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ["state_events_at_start", "events"])
+
+ assert request.args is not None
+ prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
+ chunk_id_from_query = parse_string(request, "chunk_id")
+
+ if prev_events_from_query is None:
+ raise SynapseError(
+ 400,
+ "prev_event query parameter is required when inserting historical messages back in time",
+ errcode=Codes.MISSING_PARAM,
+ )
+
+ # For the event we are inserting next to (`prev_events_from_query`),
+ # find the most recent auth events (derived from state events) that
+ # allowed that message to be sent. We will use that as a base
+ # to auth our historical messages against.
+ (
+ most_recent_prev_event_id,
+ _,
+ ) = await self.store.get_max_depth_of(prev_events_from_query)
+ # mapping from (type, state_key) -> state_event_id
+ prev_state_map = await self.state_store.get_state_ids_for_event(
+ most_recent_prev_event_id
+ )
+ # List of state event ID's
+ prev_state_ids = list(prev_state_map.values())
+ auth_event_ids = prev_state_ids
+
+ state_events_at_start = []
+ for state_event in body["state_events_at_start"]:
+ assert_params_in_dict(
+ state_event, ["type", "origin_server_ts", "content", "sender"]
+ )
+
+ logger.debug(
+ "RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s",
+ state_event,
+ auth_event_ids,
+ )
+
+ event_dict = {
+ "type": state_event["type"],
+ "origin_server_ts": state_event["origin_server_ts"],
+ "content": state_event["content"],
+ "room_id": room_id,
+ "sender": state_event["sender"],
+ "state_key": state_event["state_key"],
+ }
+
+ # Mark all events as historical
+ event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
+
+ # Make the state events float off on their own
+ fake_prev_event_id = "$" + random_string(43)
+
+ # TODO: This is pretty much the same as some other code to handle inserting state in this file
+ if event_dict["type"] == EventTypes.Member:
+ membership = event_dict["content"].get("membership", None)
+ event_id, _ = await self.room_member_handler.update_membership(
+ await self._create_requester_for_user_id_from_app_service(
+ state_event["sender"], requester.app_service
+ ),
+ target=UserID.from_string(event_dict["state_key"]),
+ room_id=room_id,
+ action=membership,
+ content=event_dict["content"],
+ outlier=True,
+ prev_event_ids=[fake_prev_event_id],
+ # Make sure to use a copy of this list because we modify it
+ # later in the loop here. Otherwise it will be the same
+ # reference and also update in the event when we append later.
+ auth_event_ids=auth_event_ids.copy(),
+ )
+ else:
+ # TODO: Add some complement tests that adds state that is not member joins
+ # and will use this code path. Maybe we only want to support join state events
+ # and can get rid of this `else`?
+ (
+ event,
+ _,
+ ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ await self._create_requester_for_user_id_from_app_service(
+ state_event["sender"], requester.app_service
+ ),
+ event_dict,
+ outlier=True,
+ prev_event_ids=[fake_prev_event_id],
+ # Make sure to use a copy of this list because we modify it
+ # later in the loop here. Otherwise it will be the same
+ # reference and also update in the event when we append later.
+ auth_event_ids=auth_event_ids.copy(),
+ )
+ event_id = event.event_id
+
+ state_events_at_start.append(event_id)
+ auth_event_ids.append(event_id)
+
+ events_to_create = body["events"]
+
+ inherited_depth = await self._inherit_depth_from_prev_ids(
+ prev_events_from_query
+ )
+
+ # Figure out which chunk to connect to. If they passed in
+ # chunk_id_from_query let's use it. The chunk ID passed in comes
+ # from the chunk_id in the "insertion" event from the previous chunk.
+ last_event_in_chunk = events_to_create[-1]
+ chunk_id_to_connect_to = chunk_id_from_query
+ base_insertion_event = None
+ if chunk_id_from_query:
+ # All but the first base insertion event should point at a fake
+ # event, which causes the HS to ask for the state at the start of
+ # the chunk later.
+ prev_event_ids = [fake_prev_event_id]
+ # TODO: Verify the chunk_id_from_query corresponds to an insertion event
+ pass
+ # Otherwise, create an insertion event to act as a starting point.
+ #
+ # We don't always have an insertion event to start hanging more history
+ # off of (ideally there would be one in the main DAG, but that's not the
+ # case if we're wanting to add history to e.g. existing rooms without
+ # an insertion event), in which case we just create a new insertion event
+ # that can then get pointed to by a "marker" event later.
+ else:
+ prev_event_ids = prev_events_from_query
+
+ base_insertion_event_dict = self._create_insertion_event_dict(
+ sender=requester.user.to_string(),
+ room_id=room_id,
+ origin_server_ts=last_event_in_chunk["origin_server_ts"],
+ )
+ base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
+
+ (
+ base_insertion_event,
+ _,
+ ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ await self._create_requester_for_user_id_from_app_service(
+ base_insertion_event_dict["sender"],
+ requester.app_service,
+ ),
+ base_insertion_event_dict,
+ prev_event_ids=base_insertion_event_dict.get("prev_events"),
+ auth_event_ids=auth_event_ids,
+ historical=True,
+ depth=inherited_depth,
+ )
+
+ chunk_id_to_connect_to = base_insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_CHUNK_ID
+ ]
+
+ # Connect this current chunk to the insertion event from the previous chunk
+ chunk_event = {
+ "type": EventTypes.MSC2716_CHUNK,
+ "sender": requester.user.to_string(),
+ "room_id": room_id,
+ "content": {
+ EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to,
+ EventContentFields.MSC2716_HISTORICAL: True,
+ },
+ # Since the chunk event is put at the end of the chunk,
+ # where the newest-in-time event is, copy the origin_server_ts from
+ # the last event we're inserting
+ "origin_server_ts": last_event_in_chunk["origin_server_ts"],
+ }
+ # Add the chunk event to the end of the chunk (newest-in-time)
+ events_to_create.append(chunk_event)
+
+ # Add an "insertion" event to the start of each chunk (next to the oldest-in-time
+ # event in the chunk) so the next chunk can be connected to this one.
+ insertion_event = self._create_insertion_event_dict(
+ sender=requester.user.to_string(),
+ room_id=room_id,
+ # Since the insertion event is put at the start of the chunk,
+ # where the oldest-in-time event is, copy the origin_server_ts from
+ # the first event we're inserting
+ origin_server_ts=events_to_create[0]["origin_server_ts"],
+ )
+ # Prepend the insertion event to the start of the chunk (oldest-in-time)
+ events_to_create = [insertion_event] + events_to_create
+
+ event_ids = []
+ events_to_persist = []
+ for ev in events_to_create:
+ assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
+
+ event_dict = {
+ "type": ev["type"],
+ "origin_server_ts": ev["origin_server_ts"],
+ "content": ev["content"],
+ "room_id": room_id,
+ "sender": ev["sender"], # requester.user.to_string(),
+ "prev_events": prev_event_ids.copy(),
+ }
+
+ # Mark all events as historical
+ event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
+
+ event, context = await self.event_creation_handler.create_event(
+ await self._create_requester_for_user_id_from_app_service(
+ ev["sender"], requester.app_service
+ ),
+ event_dict,
+ prev_event_ids=event_dict.get("prev_events"),
+ auth_event_ids=auth_event_ids,
+ historical=True,
+ depth=inherited_depth,
+ )
+ logger.debug(
+ "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
+ event,
+ prev_event_ids,
+ auth_event_ids,
+ )
+
+ assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
+ event.sender,
+ )
+
+ events_to_persist.append((event, context))
+ event_id = event.event_id
+
+ event_ids.append(event_id)
+ prev_event_ids = [event_id]
+
+ # Persist events in reverse-chronological order so they have the
+ # correct stream_ordering as they are backfilled (which decrements).
+ # Events are sorted by (topological_ordering, stream_ordering)
+ # where topological_ordering is just depth.
+ for (event, context) in reversed(events_to_persist):
+ ev = await self.event_creation_handler.handle_new_client_event(
+ await self._create_requester_for_user_id_from_app_service(
+ event["sender"], requester.app_service
+ ),
+ event=event,
+ context=context,
+ )
+
+ # Add the base_insertion_event to the bottom of the list we return
+ if base_insertion_event is not None:
+ event_ids.append(base_insertion_event.event_id)
+
+ return 200, {
+ "state_events": state_events_at_start,
+ "events": event_ids,
+ "next_chunk_id": insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_CHUNK_ID
+ ],
+ }
+
+ def on_GET(self, request: Request, room_id: str) -> Tuple[int, str]:
+ return 501, "Not implemented"
+
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
+ return self.txns.fetch_or_execute_request(
+ request, self.on_POST, request, room_id
+ )
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ msc2716_enabled = hs.config.experimental.msc2716_enabled
+
+ if msc2716_enabled:
+ RoomBatchSendEventRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/room_keys.py
index 263596be86..37e39570f6 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/room_keys.py
@@ -13,16 +13,23 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Optional, Tuple
from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -31,16 +38,14 @@ class RoomKeysServlet(RestServlet):
"/room_keys/keys(/(?P<room_id>[^/]+))?(/(?P<session_id>[^/]+))?$"
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
- async def on_PUT(self, request, room_id, session_id):
+ async def on_PUT(
+ self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Uploads one or more encrypted E2E room keys for backup purposes.
room_id: the ID of the room the keys are for (optional)
@@ -133,7 +138,9 @@ class RoomKeysServlet(RestServlet):
ret = await self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
return 200, ret
- async def on_GET(self, request, room_id, session_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Retrieves one or more encrypted E2E room keys for backup purposes.
Symmetric with the PUT version of the API.
@@ -215,7 +222,9 @@ class RoomKeysServlet(RestServlet):
return 200, room_keys
- async def on_DELETE(self, request, room_id, session_id):
+ async def on_DELETE(
+ self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Deletes one or more encrypted E2E room keys for a user for backup purposes.
@@ -242,16 +251,12 @@ class RoomKeysServlet(RestServlet):
class RoomKeysNewVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""
Create a new backup version for this user's room_keys with the given
info. The version is allocated by the server and returned to the user
@@ -295,16 +300,14 @@ class RoomKeysNewVersionServlet(RestServlet):
class RoomKeysVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version(/(?P<version>[^/]+))?$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
- async def on_GET(self, request, version):
+ async def on_GET(
+ self, request: SynapseRequest, version: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Retrieve the version information about a given version of the user's
room_keys backup. If the version part is missing, returns info about the
@@ -332,7 +335,9 @@ class RoomKeysVersionServlet(RestServlet):
raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
return 200, info
- async def on_DELETE(self, request, version):
+ async def on_DELETE(
+ self, request: SynapseRequest, version: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Delete the information about a given version of the user's
room_keys backup. If the version part is missing, deletes the most
@@ -351,7 +356,9 @@ class RoomKeysVersionServlet(RestServlet):
await self.e2e_room_keys_handler.delete_version(user_id, version)
return 200, {}
- async def on_PUT(self, request, version):
+ async def on_PUT(
+ self, request: SynapseRequest, version: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Update the information about a given version of the user's room_keys backup.
@@ -385,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomKeysServlet(hs).register(http_server)
RoomKeysVersionServlet(hs).register(http_server)
RoomKeysNewVersionServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/room_upgrade_rest_servlet.py
index 6d1b083acb..6a7792e18b 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/room_upgrade_rest_servlet.py
@@ -13,18 +13,25 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, ShadowBanError, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from synapse.util import stringutils
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -41,9 +48,6 @@ class RoomUpgradeRestServlet(RestServlet):
}
Creates a new room and shuts down the old one. Returns the ID of the new room.
-
- Args:
- hs (synapse.server.HomeServer):
"""
PATTERNS = client_patterns(
@@ -51,13 +55,15 @@ class RoomUpgradeRestServlet(RestServlet):
"/rooms/(?P<room_id>[^/]*)/upgrade$"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self._hs = hs
self._room_creation_handler = hs.get_room_creation_handler()
self._auth = hs.get_auth()
- async def on_POST(self, request, room_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
@@ -84,5 +90,5 @@ class RoomUpgradeRestServlet(RestServlet):
return 200, ret
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomUpgradeRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/sendtodevice.py
index d537d811d8..3322c8ef48 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/sendtodevice.py
@@ -13,15 +13,21 @@
# limitations under the License.
import logging
-from typing import Tuple
+from typing import TYPE_CHECKING, Awaitable, Tuple
from synapse.http import servlet
+from synapse.http.server import HttpServer
from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag, trace
from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -30,11 +36,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
"/sendToDevice/(?P<message_type>[^/]*)/(?P<txn_id>[^/]*)$"
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -42,14 +44,18 @@ class SendToDeviceRestServlet(servlet.RestServlet):
self.device_message_handler = hs.get_device_message_handler()
@trace(opname="sendToDevice")
- def on_PUT(self, request, message_type, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, message_type: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("message_type", message_type)
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self._put, request, message_type, txn_id
)
- async def _put(self, request, message_type, txn_id):
+ async def _put(
+ self, request: SynapseRequest, message_type: str, txn_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
content = parse_json_object_from_request(request)
@@ -59,9 +65,8 @@ class SendToDeviceRestServlet(servlet.RestServlet):
requester, message_type, content["messages"]
)
- response: Tuple[int, dict] = (200, {})
- return response
+ return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SendToDeviceRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/shared_rooms.py b/synapse/rest/client/shared_rooms.py
index d2e7f04b40..1d90493eb0 100644
--- a/synapse/rest/client/v2_alpha/shared_rooms.py
+++ b/synapse/rest/client/shared_rooms.py
@@ -12,13 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
-from synapse.types import UserID
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict, UserID
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -32,13 +38,15 @@ class UserSharedRoomsServlet(RestServlet):
releases=(), # This is an unstable feature
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.user_directory_active = hs.config.update_user_directory
- async def on_GET(self, request, user_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
if not self.user_directory_active:
raise SynapseError(
@@ -63,5 +71,5 @@ class UserSharedRoomsServlet(RestServlet):
return 200, {"joined": list(rooms)}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
UserSharedRoomsServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/sync.py
index e321668698..1259058b9b 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/sync.py
@@ -14,17 +14,38 @@
import itertools
import logging
from collections import defaultdict
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Union,
+)
from synapse.api.constants import Membership, PresenceState
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection
+from synapse.api.presence import UserPresenceState
+from synapse.events import EventBase
from synapse.events.utils import (
format_event_for_client_v2_without_room_id,
format_event_raw,
)
from synapse.handlers.presence import format_user_presence_state
-from synapse.handlers.sync import KnockedSyncResult, SyncConfig
+from synapse.handlers.sync import (
+ ArchivedSyncResult,
+ InvitedSyncResult,
+ JoinedSyncResult,
+ KnockedSyncResult,
+ SyncConfig,
+ SyncResult,
+)
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict, StreamToken
@@ -192,6 +213,8 @@ class SyncRestServlet(RestServlet):
return 200, {}
time_now = self.clock.time_msec()
+ # We know that the the requester has an access token since appservices
+ # cannot use sync.
response_content = await self.encode_response(
time_now, sync_result, requester.access_token_id, filter_collection
)
@@ -199,7 +222,13 @@ class SyncRestServlet(RestServlet):
logger.debug("Event formatting complete")
return 200, response_content
- async def encode_response(self, time_now, sync_result, access_token_id, filter):
+ async def encode_response(
+ self,
+ time_now: int,
+ sync_result: SyncResult,
+ access_token_id: Optional[int],
+ filter: FilterCollection,
+ ) -> JsonDict:
logger.debug("Formatting events in sync response")
if filter.event_format == "client":
event_formatter = format_event_for_client_v2_without_room_id
@@ -234,7 +263,7 @@ class SyncRestServlet(RestServlet):
logger.debug("building sync response dict")
- response: dict = defaultdict(dict)
+ response: JsonDict = defaultdict(dict)
response["next_batch"] = await sync_result.next_batch.to_string(self.store)
if sync_result.account_data:
@@ -259,10 +288,11 @@ class SyncRestServlet(RestServlet):
# Corresponding synapse issue: https://github.com/matrix-org/synapse/issues/10456
response["device_one_time_keys_count"] = sync_result.device_one_time_keys_count
- if sync_result.device_unused_fallback_key_types:
- response[
- "org.matrix.msc2732.device_unused_fallback_key_types"
- ] = sync_result.device_unused_fallback_key_types
+ # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
+ # states that this field should always be included, as long as the server supports the feature.
+ response[
+ "org.matrix.msc2732.device_unused_fallback_key_types"
+ ] = sync_result.device_unused_fallback_key_types
if joined:
response["rooms"][Membership.JOIN] = joined
@@ -273,6 +303,8 @@ class SyncRestServlet(RestServlet):
if archived:
response["rooms"][Membership.LEAVE] = archived
+ # By the time we get here groups is no longer optional.
+ assert sync_result.groups is not None
if sync_result.groups.join:
response["groups"][Membership.JOIN] = sync_result.groups.join
if sync_result.groups.invite:
@@ -283,7 +315,7 @@ class SyncRestServlet(RestServlet):
return response
@staticmethod
- def encode_presence(events, time_now):
+ def encode_presence(events: List[UserPresenceState], time_now: int) -> JsonDict:
return {
"events": [
{
@@ -298,25 +330,27 @@ class SyncRestServlet(RestServlet):
}
async def encode_joined(
- self, rooms, time_now, token_id, event_fields, event_formatter
- ):
+ self,
+ rooms: List[JoinedSyncResult],
+ time_now: int,
+ token_id: Optional[int],
+ event_fields: List[str],
+ event_formatter: Callable[[JsonDict], JsonDict],
+ ) -> JsonDict:
"""
Encode the joined rooms in a sync result
Args:
- rooms(list[synapse.handlers.sync.JoinedSyncResult]): list of sync
- results for rooms this user is joined to
- time_now(int): current time - used as a baseline for age
- calculations
- token_id(int): ID of the user's auth token - used for namespacing
+ rooms: list of sync results for rooms this user is joined to
+ time_now: current time - used as a baseline for age calculations
+ token_id: ID of the user's auth token - used for namespacing
of transaction IDs
- event_fields(list<str>): List of event fields to include. If empty,
+ event_fields: List of event fields to include. If empty,
all fields will be returned.
- event_formatter (func[dict]): function to convert from federation format
+ event_formatter: function to convert from federation format
to client format
Returns:
- dict[str, dict[str, object]]: the joined rooms list, in our
- response format
+ The joined rooms list, in our response format
"""
joined = {}
for room in rooms:
@@ -331,23 +365,26 @@ class SyncRestServlet(RestServlet):
return joined
- async def encode_invited(self, rooms, time_now, token_id, event_formatter):
+ async def encode_invited(
+ self,
+ rooms: List[InvitedSyncResult],
+ time_now: int,
+ token_id: Optional[int],
+ event_formatter: Callable[[JsonDict], JsonDict],
+ ) -> JsonDict:
"""
Encode the invited rooms in a sync result
Args:
- rooms(list[synapse.handlers.sync.InvitedSyncResult]): list of
- sync results for rooms this user is invited to
- time_now(int): current time - used as a baseline for age
- calculations
- token_id(int): ID of the user's auth token - used for namespacing
+ rooms: list of sync results for rooms this user is invited to
+ time_now: current time - used as a baseline for age calculations
+ token_id: ID of the user's auth token - used for namespacing
of transaction IDs
- event_formatter (func[dict]): function to convert from federation format
+ event_formatter: function to convert from federation format
to client format
Returns:
- dict[str, dict[str, object]]: the invited rooms list, in our
- response format
+ The invited rooms list, in our response format
"""
invited = {}
for room in rooms:
@@ -370,7 +407,7 @@ class SyncRestServlet(RestServlet):
self,
rooms: List[KnockedSyncResult],
time_now: int,
- token_id: int,
+ token_id: Optional[int],
event_formatter: Callable[[Dict], Dict],
) -> Dict[str, Dict[str, Any]]:
"""
@@ -421,25 +458,26 @@ class SyncRestServlet(RestServlet):
return knocked
async def encode_archived(
- self, rooms, time_now, token_id, event_fields, event_formatter
- ):
+ self,
+ rooms: List[ArchivedSyncResult],
+ time_now: int,
+ token_id: Optional[int],
+ event_fields: List[str],
+ event_formatter: Callable[[JsonDict], JsonDict],
+ ) -> JsonDict:
"""
Encode the archived rooms in a sync result
Args:
- rooms (list[synapse.handlers.sync.ArchivedSyncResult]): list of
- sync results for rooms this user is joined to
- time_now(int): current time - used as a baseline for age
- calculations
- token_id(int): ID of the user's auth token - used for namespacing
+ rooms: list of sync results for rooms this user is joined to
+ time_now: current time - used as a baseline for age calculations
+ token_id: ID of the user's auth token - used for namespacing
of transaction IDs
- event_fields(list<str>): List of event fields to include. If empty,
+ event_fields: List of event fields to include. If empty,
all fields will be returned.
- event_formatter (func[dict]): function to convert from federation format
- to client format
+ event_formatter: function to convert from federation format to client format
Returns:
- dict[str, dict[str, object]]: The invited rooms list, in our
- response format
+ The archived rooms list, in our response format
"""
joined = {}
for room in rooms:
@@ -455,26 +493,30 @@ class SyncRestServlet(RestServlet):
return joined
async def encode_room(
- self, room, time_now, token_id, joined, only_fields, event_formatter
- ):
+ self,
+ room: Union[JoinedSyncResult, ArchivedSyncResult],
+ time_now: int,
+ token_id: Optional[int],
+ joined: bool,
+ only_fields: Optional[List[str]],
+ event_formatter: Callable[[JsonDict], JsonDict],
+ ) -> JsonDict:
"""
Args:
- room (JoinedSyncResult|ArchivedSyncResult): sync result for a
- single room
- time_now (int): current time - used as a baseline for age
- calculations
- token_id (int): ID of the user's auth token - used for namespacing
+ room: sync result for a single room
+ time_now: current time - used as a baseline for age calculations
+ token_id: ID of the user's auth token - used for namespacing
of transaction IDs
- joined (bool): True if the user is joined to this room - will mean
+ joined: True if the user is joined to this room - will mean
we handle ephemeral events
- only_fields(list<str>): Optional. The list of event fields to include.
- event_formatter (func[dict]): function to convert from federation format
+ only_fields: Optional. The list of event fields to include.
+ event_formatter: function to convert from federation format
to client format
Returns:
- dict[str, object]: the room, encoded in our response format
+ The room, encoded in our response format
"""
- def serialize(events):
+ def serialize(events: Iterable[EventBase]) -> Awaitable[List[JsonDict]]:
return self._event_serializer.serialize_events(
events,
time_now=time_now,
@@ -507,7 +549,7 @@ class SyncRestServlet(RestServlet):
account_data = room.account_data
- result = {
+ result: JsonDict = {
"timeline": {
"events": serialized_timeline,
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
@@ -518,6 +560,7 @@ class SyncRestServlet(RestServlet):
}
if joined:
+ assert isinstance(room, JoinedSyncResult)
ephemeral_events = room.ephemeral
result["ephemeral"] = {"events": ephemeral_events}
result["unread_notifications"] = room.unread_notifications
@@ -527,5 +570,5 @@ class SyncRestServlet(RestServlet):
return result
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/tags.py
index c14f83be18..c88cb9367c 100644
--- a/synapse/rest/client/v2_alpha/tags.py
+++ b/synapse/rest/client/tags.py
@@ -13,12 +13,19 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -29,12 +36,14 @@ class TagListServlet(RestServlet):
PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, user_id, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get tags for other users.")
@@ -54,12 +63,14 @@ class TagServlet(RestServlet):
"/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags/(?P<tag>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.handler = hs.get_account_data_handler()
- async def on_PUT(self, request, user_id, room_id, tag):
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str, room_id: str, tag: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add tags for other users.")
@@ -70,7 +81,9 @@ class TagServlet(RestServlet):
return 200, {}
- async def on_DELETE(self, request, user_id, room_id, tag):
+ async def on_DELETE(
+ self, request: SynapseRequest, user_id: str, room_id: str, tag: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add tags for other users.")
@@ -80,6 +93,6 @@ class TagServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
TagListServlet(hs).register(http_server)
TagServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/thirdparty.py
index b5c67c9bb6..b895c73acf 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/thirdparty.py
@@ -12,27 +12,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import logging
+from typing import TYPE_CHECKING, Dict, List, Tuple
from synapse.api.constants import ThirdPartyEntityKind
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class ThirdPartyProtocolsServlet(RestServlet):
PATTERNS = client_patterns("/thirdparty/protocols")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.appservice_handler = hs.get_application_service_handler()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await self.auth.get_user_by_req(request, allow_guest=True)
protocols = await self.appservice_handler.get_3pe_protocols()
@@ -42,13 +48,15 @@ class ThirdPartyProtocolsServlet(RestServlet):
class ThirdPartyProtocolServlet(RestServlet):
PATTERNS = client_patterns("/thirdparty/protocol/(?P<protocol>[^/]+)$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.appservice_handler = hs.get_application_service_handler()
- async def on_GET(self, request, protocol):
+ async def on_GET(
+ self, request: SynapseRequest, protocol: str
+ ) -> Tuple[int, JsonDict]:
await self.auth.get_user_by_req(request, allow_guest=True)
protocols = await self.appservice_handler.get_3pe_protocols(
@@ -63,16 +71,18 @@ class ThirdPartyProtocolServlet(RestServlet):
class ThirdPartyUserServlet(RestServlet):
PATTERNS = client_patterns("/thirdparty/user(/(?P<protocol>[^/]+))?$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.appservice_handler = hs.get_application_service_handler()
- async def on_GET(self, request, protocol):
+ async def on_GET(
+ self, request: SynapseRequest, protocol: str
+ ) -> Tuple[int, List[JsonDict]]:
await self.auth.get_user_by_req(request, allow_guest=True)
- fields = request.args
+ fields: Dict[bytes, List[bytes]] = request.args # type: ignore[assignment]
fields.pop(b"access_token", None)
results = await self.appservice_handler.query_3pe(
@@ -85,16 +95,18 @@ class ThirdPartyUserServlet(RestServlet):
class ThirdPartyLocationServlet(RestServlet):
PATTERNS = client_patterns("/thirdparty/location(/(?P<protocol>[^/]+))?$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.appservice_handler = hs.get_application_service_handler()
- async def on_GET(self, request, protocol):
+ async def on_GET(
+ self, request: SynapseRequest, protocol: str
+ ) -> Tuple[int, List[JsonDict]]:
await self.auth.get_user_by_req(request, allow_guest=True)
- fields = request.args
+ fields: Dict[bytes, List[bytes]] = request.args # type: ignore[assignment]
fields.pop(b"access_token", None)
results = await self.appservice_handler.query_3pe(
@@ -104,7 +116,7 @@ class ThirdPartyLocationServlet(RestServlet):
return 200, results
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ThirdPartyProtocolsServlet(hs).register(http_server)
ThirdPartyProtocolServlet(hs).register(http_server)
ThirdPartyUserServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/tokenrefresh.py
index b2f858545c..c8c3b25bd3 100644
--- a/synapse/rest/client/v2_alpha/tokenrefresh.py
+++ b/synapse/rest/client/tokenrefresh.py
@@ -12,11 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import TYPE_CHECKING
+
+from twisted.web.server import Request
+
from synapse.api.errors import AuthError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
class TokenRefreshRestServlet(RestServlet):
"""
@@ -26,12 +34,12 @@ class TokenRefreshRestServlet(RestServlet):
PATTERNS = client_patterns("/tokenrefresh")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
- async def on_POST(self, request):
+ async def on_POST(self, request: Request) -> None:
raise AuthError(403, "tokenrefresh is no longer supported.")
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
TokenRefreshRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
index 94ff3719ce..914fb3acf5 100644
--- a/synapse/rest/client/transactions.py
+++ b/synapse/rest/client/transactions.py
@@ -15,28 +15,37 @@
"""This module contains logic for storing HTTP PUT transactions. This is used
to ensure idempotency when performing PUTs using the REST API."""
import logging
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Tuple
+
+from twisted.python.failure import Failure
+from twisted.web.server import Request
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.types import JsonDict
from synapse.util.async_helpers import ObservableDeferred
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins
class HttpTransactionCache:
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = self.hs.get_auth()
self.clock = self.hs.get_clock()
- self.transactions = {
- # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
- }
+ # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
+ self.transactions: Dict[
+ str, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int]
+ ] = {}
# Try to clean entries every 30 mins. This means entries will exist
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
- def _get_transaction_key(self, request):
+ def _get_transaction_key(self, request: Request) -> str:
"""A helper function which returns a transaction key that can be used
with TransactionCache for idempotent requests.
@@ -45,15 +54,21 @@ class HttpTransactionCache:
path and the access_token for the requesting user.
Args:
- request (twisted.web.http.Request): The incoming request. Must
- contain an access_token.
+ request: The incoming request. Must contain an access_token.
Returns:
- str: A transaction key
+ A transaction key
"""
+ assert request.path is not None
token = self.auth.get_access_token_from_request(request)
return request.path.decode("utf8") + "/" + token
- def fetch_or_execute_request(self, request, fn, *args, **kwargs):
+ def fetch_or_execute_request(
+ self,
+ request: Request,
+ fn: Callable[..., Awaitable[Tuple[int, JsonDict]]],
+ *args: Any,
+ **kwargs: Any,
+ ) -> Awaitable[Tuple[int, JsonDict]]:
"""A helper function for fetch_or_execute which extracts
a transaction key from the given request.
@@ -64,15 +79,20 @@ class HttpTransactionCache:
self._get_transaction_key(request), fn, *args, **kwargs
)
- def fetch_or_execute(self, txn_key, fn, *args, **kwargs):
+ def fetch_or_execute(
+ self,
+ txn_key: str,
+ fn: Callable[..., Awaitable[Tuple[int, JsonDict]]],
+ *args: Any,
+ **kwargs: Any,
+ ) -> Awaitable[Tuple[int, JsonDict]]:
"""Fetches the response for this transaction, or executes the given function
to produce a response for this transaction.
Args:
- txn_key (str): A key to ensure idempotency should fetch_or_execute be
- called again at a later point in time.
- fn (function): A function which returns a tuple of
- (response_code, response_dict).
+ txn_key: A key to ensure idempotency should fetch_or_execute be
+ called again at a later point in time.
+ fn: A function which returns a tuple of (response_code, response_dict).
*args: Arguments to pass to fn.
**kwargs: Keyword arguments to pass to fn.
Returns:
@@ -90,7 +110,7 @@ class HttpTransactionCache:
# if the request fails with an exception, remove it
# from the transaction map. This is done to ensure that we don't
# cache transient errors like rate-limiting errors, etc.
- def remove_from_map(err):
+ def remove_from_map(err: Failure) -> None:
self.transactions.pop(txn_key, None)
# we deliberately do not propagate the error any further, as we
# expect the observers to have reported it.
@@ -99,7 +119,7 @@ class HttpTransactionCache:
return make_deferred_yieldable(observable.observe())
- def _cleanup(self):
+ def _cleanup(self) -> None:
now = self.clock.time_msec()
for key in list(self.transactions):
ts = self.transactions[key][1]
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/user_directory.py
index 7e8912f0b9..8852811114 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/user_directory.py
@@ -13,29 +13,32 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class UserDirectorySearchRestServlet(RestServlet):
PATTERNS = client_patterns("/user_directory/search$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.user_directory_handler = hs.get_user_directory_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Searches for users in directory
Returns:
@@ -75,5 +78,5 @@ class UserDirectorySearchRestServlet(RestServlet):
return 200, results
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
UserDirectorySearchRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/__init__.py b/synapse/rest/client/v1/__init__.py
deleted file mode 100644
index 5e83dba2ed..0000000000
--- a/synapse/rest/client/v1/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/rest/client/v2_alpha/__init__.py b/synapse/rest/client/v2_alpha/__init__.py
deleted file mode 100644
index 5e83dba2ed..0000000000
--- a/synapse/rest/client/v2_alpha/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index fa2e4e9cba..a1a815cf82 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -17,9 +17,17 @@
import logging
import re
+from typing import TYPE_CHECKING, Tuple
+
+from twisted.web.server import Request
from synapse.api.constants import RoomCreationPreset
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -27,7 +35,7 @@ logger = logging.getLogger(__name__)
class VersionsRestServlet(RestServlet):
PATTERNS = [re.compile("^/_matrix/client/versions$")]
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
@@ -45,7 +53,7 @@ class VersionsRestServlet(RestServlet):
in self.config.encryption_enabled_by_default_for_room_presets
)
- def on_GET(self, request):
+ def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
return (
200,
{
@@ -89,5 +97,5 @@ class VersionsRestServlet(RestServlet):
)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
VersionsRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/voip.py
index c780ffded5..9d46ed3af3 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/voip.py
@@ -15,20 +15,27 @@
import base64
import hashlib
import hmac
+from typing import TYPE_CHECKING, Tuple
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
-from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
class VoipRestServlet(RestServlet):
PATTERNS = client_patterns("/voip/turnServer$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(
request, self.hs.config.turn_allow_guests
)
@@ -69,5 +76,5 @@ class VoipRestServlet(RestServlet):
)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
VoipRestServlet(hs).register(http_server)
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 4f702f890c..0f5ce41ff8 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -836,7 +836,9 @@ class MediaRepository:
return {"deleted": deleted}
- async def delete_local_media(self, media_id: str) -> Tuple[List[str], int]:
+ async def delete_local_media_ids(
+ self, media_ids: List[str]
+ ) -> Tuple[List[str], int]:
"""
Delete the given local or remote media ID from this server
@@ -845,7 +847,7 @@ class MediaRepository:
Returns:
A tuple of (list of deleted media IDs, total deleted media IDs).
"""
- return await self._remove_local_media_from_disk([media_id])
+ return await self._remove_local_media_from_disk(media_ids)
async def delete_old_local_media(
self,
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
new file mode 100644
index 0000000000..afe41823e4
--- /dev/null
+++ b/synapse/rest/media/v1/oembed.py
@@ -0,0 +1,135 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import TYPE_CHECKING, Optional
+
+import attr
+
+from synapse.http.client import SimpleHttpClient
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, auto_attribs=True)
+class OEmbedResult:
+ # Either HTML content or URL must be provided.
+ html: Optional[str]
+ url: Optional[str]
+ title: Optional[str]
+ # Number of seconds to cache the content.
+ cache_age: int
+
+
+class OEmbedError(Exception):
+ """An error occurred processing the oEmbed object."""
+
+
+class OEmbedProvider:
+ """
+ A helper for accessing oEmbed content.
+
+ It can be used to check if a URL should be accessed via oEmbed and for
+ requesting/parsing oEmbed content.
+ """
+
+ def __init__(self, hs: "HomeServer", client: SimpleHttpClient):
+ self._oembed_patterns = {}
+ for oembed_endpoint in hs.config.oembed.oembed_patterns:
+ for pattern in oembed_endpoint.url_patterns:
+ self._oembed_patterns[pattern] = oembed_endpoint.api_endpoint
+ self._client = client
+
+ def get_oembed_url(self, url: str) -> Optional[str]:
+ """
+ Check whether the URL should be downloaded as oEmbed content instead.
+
+ Args:
+ url: The URL to check.
+
+ Returns:
+ A URL to use instead or None if the original URL should be used.
+ """
+ for url_pattern, endpoint in self._oembed_patterns.items():
+ if url_pattern.fullmatch(url):
+ return endpoint
+
+ # No match.
+ return None
+
+ async def get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
+ """
+ Request content from an oEmbed endpoint.
+
+ Args:
+ endpoint: The oEmbed API endpoint.
+ url: The URL to pass to the API.
+
+ Returns:
+ An object representing the metadata returned.
+
+ Raises:
+ OEmbedError if fetching or parsing of the oEmbed information fails.
+ """
+ try:
+ logger.debug("Trying to get oEmbed content for url '%s'", url)
+ result = await self._client.get_json(
+ endpoint,
+ # TODO Specify max height / width.
+ # Note that only the JSON format is supported.
+ args={"url": url},
+ )
+
+ # Ensure there's a version of 1.0.
+ if result.get("version") != "1.0":
+ raise OEmbedError("Invalid version: %s" % (result.get("version"),))
+
+ oembed_type = result.get("type")
+
+ # Ensure the cache age is None or an int.
+ cache_age = result.get("cache_age")
+ if cache_age:
+ cache_age = int(cache_age)
+
+ oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
+
+ # HTML content.
+ if oembed_type == "rich":
+ oembed_result.html = result.get("html")
+ return oembed_result
+
+ if oembed_type == "photo":
+ oembed_result.url = result.get("url")
+ return oembed_result
+
+ # TODO Handle link and video types.
+
+ if "thumbnail_url" in result:
+ oembed_result.url = result.get("thumbnail_url")
+ return oembed_result
+
+ raise OEmbedError("Incompatible oEmbed information.")
+
+ except OEmbedError as e:
+ # Trap OEmbedErrors first so we can directly re-raise them.
+ logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
+ raise
+
+ except Exception as e:
+ # Trap any exception and let the code follow as usual.
+ # FIXME: pass through 404s and other error messages nicely
+ logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
+ raise OEmbedError() from e
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 0f051d4041..317d333b12 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -25,8 +25,6 @@ import traceback
from typing import TYPE_CHECKING, Any, Dict, Generator, Iterable, Optional, Union
from urllib import parse as urlparse
-import attr
-
from twisted.internet.error import DNSLookupError
from twisted.web.server import Request
@@ -43,6 +41,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.media.v1._base import get_filename_from_headers
from synapse.rest.media.v1.media_storage import MediaStorage
+from synapse.rest.media.v1.oembed import OEmbedError, OEmbedProvider
from synapse.util import json_encoder
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.expiringcache import ExpiringCache
@@ -71,63 +70,6 @@ OG_TAG_VALUE_MAXLEN = 1000
ONE_HOUR = 60 * 60 * 1000
-# A map of globs to API endpoints.
-_oembed_globs = {
- # Twitter.
- "https://publish.twitter.com/oembed": [
- "https://twitter.com/*/status/*",
- "https://*.twitter.com/*/status/*",
- "https://twitter.com/*/moments/*",
- "https://*.twitter.com/*/moments/*",
- # Include the HTTP versions too.
- "http://twitter.com/*/status/*",
- "http://*.twitter.com/*/status/*",
- "http://twitter.com/*/moments/*",
- "http://*.twitter.com/*/moments/*",
- ],
-}
-# Convert the globs to regular expressions.
-_oembed_patterns = {}
-for endpoint, globs in _oembed_globs.items():
- for glob in globs:
- # Convert the glob into a sane regular expression to match against. The
- # rules followed will be slightly different for the domain portion vs.
- # the rest.
- #
- # 1. The scheme must be one of HTTP / HTTPS (and have no globs).
- # 2. The domain can have globs, but we limit it to characters that can
- # reasonably be a domain part.
- # TODO: This does not attempt to handle Unicode domain names.
- # 3. Other parts allow a glob to be any one, or more, characters.
- results = urlparse.urlparse(glob)
-
- # Ensure the scheme does not have wildcards (and is a sane scheme).
- if results.scheme not in {"http", "https"}:
- raise ValueError("Insecure oEmbed glob scheme: %s" % (results.scheme,))
-
- pattern = urlparse.urlunparse(
- [
- results.scheme,
- re.escape(results.netloc).replace("\\*", "[a-zA-Z0-9_-]+"),
- ]
- + [re.escape(part).replace("\\*", ".+") for part in results[2:]]
- )
- _oembed_patterns[re.compile(pattern)] = endpoint
-
-
-@attr.s(slots=True)
-class OEmbedResult:
- # Either HTML content or URL must be provided.
- html = attr.ib(type=Optional[str])
- url = attr.ib(type=Optional[str])
- title = attr.ib(type=Optional[str])
- # Number of seconds to cache the content.
- cache_age = attr.ib(type=int)
-
-
-class OEmbedError(Exception):
- """An error occurred processing the oEmbed object."""
-
class PreviewUrlResource(DirectServeJsonResource):
isLeaf = True
@@ -157,6 +99,8 @@ class PreviewUrlResource(DirectServeJsonResource):
self.primary_base_path = media_repo.primary_base_path
self.media_storage = media_storage
+ self._oembed = OEmbedProvider(hs, self.client)
+
# We run the background jobs if we're the instance specified (or no
# instance is specified, where we assume there is only one instance
# serving media).
@@ -367,87 +311,6 @@ class PreviewUrlResource(DirectServeJsonResource):
return jsonog.encode("utf8")
- def _get_oembed_url(self, url: str) -> Optional[str]:
- """
- Check whether the URL should be downloaded as oEmbed content instead.
-
- Args:
- url: The URL to check.
-
- Returns:
- A URL to use instead or None if the original URL should be used.
- """
- for url_pattern, endpoint in _oembed_patterns.items():
- if url_pattern.fullmatch(url):
- return endpoint
-
- # No match.
- return None
-
- async def _get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
- """
- Request content from an oEmbed endpoint.
-
- Args:
- endpoint: The oEmbed API endpoint.
- url: The URL to pass to the API.
-
- Returns:
- An object representing the metadata returned.
-
- Raises:
- OEmbedError if fetching or parsing of the oEmbed information fails.
- """
- try:
- logger.debug("Trying to get oEmbed content for url '%s'", url)
- result = await self.client.get_json(
- endpoint,
- # TODO Specify max height / width.
- # Note that only the JSON format is supported.
- args={"url": url},
- )
-
- # Ensure there's a version of 1.0.
- if result.get("version") != "1.0":
- raise OEmbedError("Invalid version: %s" % (result.get("version"),))
-
- oembed_type = result.get("type")
-
- # Ensure the cache age is None or an int.
- cache_age = result.get("cache_age")
- if cache_age:
- cache_age = int(cache_age)
-
- oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
-
- # HTML content.
- if oembed_type == "rich":
- oembed_result.html = result.get("html")
- return oembed_result
-
- if oembed_type == "photo":
- oembed_result.url = result.get("url")
- return oembed_result
-
- # TODO Handle link and video types.
-
- if "thumbnail_url" in result:
- oembed_result.url = result.get("thumbnail_url")
- return oembed_result
-
- raise OEmbedError("Incompatible oEmbed information.")
-
- except OEmbedError as e:
- # Trap OEmbedErrors first so we can directly re-raise them.
- logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
- raise
-
- except Exception as e:
- # Trap any exception and let the code follow as usual.
- # FIXME: pass through 404s and other error messages nicely
- logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
- raise OEmbedError() from e
-
async def _download_url(self, url: str, user: str) -> Dict[str, Any]:
# TODO: we should probably honour robots.txt... except in practice
# we're most likely being explicitly triggered by a human rather than a
@@ -459,11 +322,11 @@ class PreviewUrlResource(DirectServeJsonResource):
# If this URL can be accessed via oEmbed, use that instead.
url_to_download: Optional[str] = url
- oembed_url = self._get_oembed_url(url)
+ oembed_url = self._oembed.get_oembed_url(url)
if oembed_url:
# The result might be a new URL to download, or it might be HTML content.
try:
- oembed_result = await self._get_oembed_content(oembed_url, url)
+ oembed_result = await self._oembed.get_oembed_content(oembed_url, url)
if oembed_result.url:
url_to_download = oembed_result.url
elif oembed_result.html:
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index a029d426f0..12bd745cb2 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -15,7 +15,7 @@
import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from twisted.web.server import Request
@@ -414,9 +414,9 @@ class ThumbnailResource(DirectServeJsonResource):
if desired_method == "crop":
# Thumbnails that match equal or larger sizes of desired width/height.
- crop_info_list = []
+ crop_info_list: List[Tuple[int, int, int, bool, int, Dict[str, Any]]] = []
# Other thumbnails.
- crop_info_list2 = []
+ crop_info_list2: List[Tuple[int, int, int, bool, int, Dict[str, Any]]] = []
for info in thumbnail_infos:
# Skip thumbnails generated with different methods.
if info["thumbnail_method"] != "crop":
@@ -451,15 +451,19 @@ class ThumbnailResource(DirectServeJsonResource):
info,
)
)
+ # Pick the most appropriate thumbnail. Some values of `desired_width` and
+ # `desired_height` may result in a tie, in which case we avoid comparing on
+ # the thumbnail info dictionary and pick the thumbnail that appears earlier
+ # in the list of candidates.
if crop_info_list:
- thumbnail_info = min(crop_info_list)[-1]
+ thumbnail_info = min(crop_info_list, key=lambda t: t[:-1])[-1]
elif crop_info_list2:
- thumbnail_info = min(crop_info_list2)[-1]
+ thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1]
elif desired_method == "scale":
# Thumbnails that match equal or larger sizes of desired width/height.
- info_list = []
+ info_list: List[Tuple[int, bool, int, Dict[str, Any]]] = []
# Other thumbnails.
- info_list2 = []
+ info_list2: List[Tuple[int, bool, int, Dict[str, Any]]] = []
for info in thumbnail_infos:
# Skip thumbnails generated with different methods.
@@ -477,10 +481,14 @@ class ThumbnailResource(DirectServeJsonResource):
info_list2.append(
(size_quality, type_quality, length_quality, info)
)
+ # Pick the most appropriate thumbnail. Some values of `desired_width` and
+ # `desired_height` may result in a tie, in which case we avoid comparing on
+ # the thumbnail info dictionary and pick the thumbnail that appears earlier
+ # in the list of candidates.
if info_list:
- thumbnail_info = min(info_list)[-1]
+ thumbnail_info = min(info_list, key=lambda t: t[:-1])[-1]
elif info_list2:
- thumbnail_info = min(info_list2)[-1]
+ thumbnail_info = min(info_list2, key=lambda t: t[:-1])[-1]
if thumbnail_info:
return FileInfo(
diff --git a/synapse/rest/synapse/client/new_user_consent.py b/synapse/rest/synapse/client/new_user_consent.py
index 488b97b32e..fc62a09b7f 100644
--- a/synapse/rest/synapse/client/new_user_consent.py
+++ b/synapse/rest/synapse/client/new_user_consent.py
@@ -46,6 +46,8 @@ class NewUserConsentResource(DirectServeHtmlResource):
self._consent_version = hs.config.consent.user_consent_version
def template_search_dirs():
+ if hs.config.server.custom_template_directory:
+ yield hs.config.server.custom_template_directory
if hs.config.sso.sso_template_dir:
yield hs.config.sso.sso_template_dir
yield hs.config.sso.default_template_dir
diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py
index ab24ec0a8e..c15b83c387 100644
--- a/synapse/rest/synapse/client/pick_username.py
+++ b/synapse/rest/synapse/client/pick_username.py
@@ -74,6 +74,8 @@ class AccountDetailsResource(DirectServeHtmlResource):
self._sso_handler = hs.get_sso_handler()
def template_search_dirs():
+ if hs.config.server.custom_template_directory:
+ yield hs.config.server.custom_template_directory
if hs.config.sso.sso_template_dir:
yield hs.config.sso.sso_template_dir
yield hs.config.sso.default_template_dir
diff --git a/synapse/server.py b/synapse/server.py
index 095dba9ad0..5adeeff61a 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -34,8 +34,6 @@ from typing import (
)
import twisted.internet.tcp
-from twisted.internet import defer
-from twisted.mail.smtp import sendmail
from twisted.web.iweb import IPolicyForHTTPS
from twisted.web.resource import IResource
@@ -78,6 +76,7 @@ from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler
from synapse.handlers.event_auth import EventAuthHandler
from synapse.handlers.events import EventHandler, EventStreamHandler
from synapse.handlers.federation import FederationHandler
+from synapse.handlers.federation_event import FederationEventHandler
from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler
from synapse.handlers.identity import IdentityHandler
from synapse.handlers.initial_sync import InitialSyncHandler
@@ -101,10 +100,10 @@ from synapse.handlers.room import (
from synapse.handlers.room_list import RoomListHandler
from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler
from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
+from synapse.handlers.room_summary import RoomSummaryHandler
from synapse.handlers.search import SearchHandler
from synapse.handlers.send_email import SendEmailHandler
from synapse.handlers.set_password import SetPasswordHandler
-from synapse.handlers.space_summary import SpaceSummaryHandler
from synapse.handlers.sso import SsoHandler
from synapse.handlers.stats import StatsHandler
from synapse.handlers.sync import SyncHandler
@@ -443,10 +442,6 @@ class HomeServer(metaclass=abc.ABCMeta):
return RoomShutdownHandler(self)
@cache_in_self
- def get_sendmail(self) -> Callable[..., defer.Deferred]:
- return sendmail
-
- @cache_in_self
def get_state_handler(self) -> StateHandler:
return StateHandler(self)
@@ -553,6 +548,10 @@ class HomeServer(metaclass=abc.ABCMeta):
return FederationHandler(self)
@cache_in_self
+ def get_federation_event_handler(self) -> FederationEventHandler:
+ return FederationEventHandler(self)
+
+ @cache_in_self
def get_identity_handler(self) -> IdentityHandler:
return IdentityHandler(self)
@@ -778,8 +777,8 @@ class HomeServer(metaclass=abc.ABCMeta):
return AccountDataHandler(self)
@cache_in_self
- def get_space_summary_handler(self) -> SpaceSummaryHandler:
- return SpaceSummaryHandler(self)
+ def get_room_summary_handler(self) -> RoomSummaryHandler:
+ return RoomSummaryHandler(self)
@cache_in_self
def get_event_auth_handler(self) -> EventAuthHandler:
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index f19075b760..d87a538917 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -12,26 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import Optional
+from typing import TYPE_CHECKING, Optional
from synapse.api.constants import EventTypes, Membership, RoomCreationPreset
from synapse.events import EventBase
from synapse.types import UserID, create_requester
from synapse.util.caches.descriptors import cached
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
SERVER_NOTICE_ROOM_TAG = "m.server_notice"
class ServerNoticesManager:
- def __init__(self, hs):
- """
-
- Args:
- hs (synapse.server.HomeServer):
- """
-
+ def __init__(self, hs: "HomeServer"):
self._store = hs.get_datastore()
self._config = hs.config
self._account_data_handler = hs.get_account_data_handler()
@@ -58,6 +55,7 @@ class ServerNoticesManager:
event_content: dict,
type: str = EventTypes.Message,
state_key: Optional[str] = None,
+ txn_id: Optional[str] = None,
) -> EventBase:
"""Send a notice to the given user
@@ -68,6 +66,7 @@ class ServerNoticesManager:
event_content: content of event to send
type: type of event
is_state_event: Is the event a state event
+ txn_id: The transaction ID.
"""
room_id = await self.get_or_create_notice_room_for_user(user_id)
await self.maybe_invite_user_to_room(user_id, room_id)
@@ -90,7 +89,7 @@ class ServerNoticesManager:
event_dict["state_key"] = state_key
event, _ = await self._event_creation_handler.create_and_send_nonmember_event(
- requester, event_dict, ratelimit=False
+ requester, event_dict, ratelimit=False, txn_id=txn_id
)
return event
diff --git a/synapse/static/client/register/style.css b/synapse/static/client/register/style.css
index 5a7b6eebf2..8a39b5d0f5 100644
--- a/synapse/static/client/register/style.css
+++ b/synapse/static/client/register/style.css
@@ -57,4 +57,8 @@ textarea, input {
background-color: #f8f8f8;
border: 1px #ccc solid;
-}
\ No newline at end of file
+}
+
+.error {
+ color: red;
+}
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index c8015a3848..0084d9f96c 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -280,18 +280,18 @@ class LoggingTransaction:
else:
self.executemany(sql, args)
- def execute_values(self, sql: str, *args: Any) -> List[Tuple]:
+ def execute_values(self, sql: str, *args: Any, fetch: bool = True) -> List[Tuple]:
"""Corresponds to psycopg2.extras.execute_values. Only available when
using postgres.
- Always sets fetch=True when caling `execute_values`, so will return the
- results.
+ The `fetch` parameter must be set to False if the query does not return
+ rows (e.g. INSERTs).
"""
assert isinstance(self.database_engine, PostgresEngine)
from psycopg2.extras import execute_values # type: ignore
return self._do_execute(
- lambda *x: execute_values(self.txn, *x, fetch=True), sql, *args
+ lambda *x: execute_values(self.txn, *x, fetch=fetch), sql, *args
)
def execute(self, sql: str, *args: Any) -> None:
@@ -920,13 +920,23 @@ class DatabasePool:
if k != keys[0]:
raise RuntimeError("All items must have the same keys")
- sql = "INSERT INTO %s (%s) VALUES(%s)" % (
- table,
- ", ".join(k for k in keys[0]),
- ", ".join("?" for _ in keys[0]),
- )
+ if isinstance(txn.database_engine, PostgresEngine):
+ # We use `execute_values` as it can be a lot faster than `execute_batch`,
+ # but it's only available on postgres.
+ sql = "INSERT INTO %s (%s) VALUES ?" % (
+ table,
+ ", ".join(k for k in keys[0]),
+ )
+
+ txn.execute_values(sql, vals, fetch=False)
+ else:
+ sql = "INSERT INTO %s (%s) VALUES(%s)" % (
+ table,
+ ", ".join(k for k in keys[0]),
+ ", ".join("?" for _ in keys[0]),
+ )
- txn.execute_batch(sql, vals)
+ txn.execute_batch(sql, vals)
async def simple_upsert(
self,
@@ -941,13 +951,13 @@ class DatabasePool:
`lock` should generally be set to True (the default), but can be set
to False if either of the following are true:
-
- * there is a UNIQUE INDEX on the key columns. In this case a conflict
- will cause an IntegrityError in which case this function will retry
- the update.
-
- * we somehow know that we are the only thread which will be updating
- this table.
+ 1. there is a UNIQUE INDEX on the key columns. In this case a conflict
+ will cause an IntegrityError in which case this function will retry
+ the update.
+ 2. we somehow know that we are the only thread which will be updating
+ this table.
+ As an additional note, this parameter only matters for old SQLite versions
+ because we will use native upserts otherwise.
Args:
table: The table to upsert into
@@ -1281,20 +1291,33 @@ class DatabasePool:
k + "=EXCLUDED." + k for k in value_names
)
- sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s" % (
- table,
- ", ".join(k for k in allnames),
- ", ".join("?" for _ in allnames),
- ", ".join(key_names),
- latter,
- )
-
args = []
for x, y in zip(key_values, value_values):
args.append(tuple(x) + tuple(y))
- return txn.execute_batch(sql, args)
+ if isinstance(txn.database_engine, PostgresEngine):
+ # We use `execute_values` as it can be a lot faster than `execute_batch`,
+ # but it's only available on postgres.
+ sql = "INSERT INTO %s (%s) VALUES ? ON CONFLICT (%s) DO %s" % (
+ table,
+ ", ".join(k for k in allnames),
+ ", ".join(key_names),
+ latter,
+ )
+
+ txn.execute_values(sql, args, fetch=False)
+
+ else:
+ sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s" % (
+ table,
+ ", ".join(k for k in allnames),
+ ", ".join("?" for _ in allnames),
+ ", ".join(key_names),
+ latter,
+ )
+
+ return txn.execute_batch(sql, args)
@overload
async def simple_select_one(
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 8d9f07111d..00a644e8f7 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -63,6 +63,7 @@ from .relations import RelationsStore
from .room import RoomStore
from .roommember import RoomMemberStore
from .search import SearchStore
+from .session import SessionStore
from .signatures import SignatureStore
from .state import StateStore
from .stats import StatsStore
@@ -121,15 +122,13 @@ class DataStore(
ServerMetricsStore,
EventForwardExtremitiesStore,
LockStore,
+ SessionStore,
):
def __init__(self, database: DatabasePool, db_conn, hs):
self.hs = hs
self._clock = hs.get_clock()
self.database_engine = database.engine
- self._public_room_id_gen = StreamIdGenerator(
- db_conn, "public_room_list_stream", "stream_id"
- )
self._device_list_id_gen = StreamIdGenerator(
db_conn,
"device_lists_stream",
@@ -170,6 +169,7 @@ class DataStore(
sequence_name="cache_invalidation_stream_seq",
writers=[],
)
+
else:
self._cache_id_gen = None
diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py
index 86075bc55b..6daf8b8ffb 100644
--- a/synapse/storage/databases/main/directory.py
+++ b/synapse/storage/databases/main/directory.py
@@ -75,8 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore):
desc="get_aliases_for_room",
)
-
-class DirectoryStore(DirectoryWorkerStore):
async def create_room_alias_association(
self,
room_alias: RoomAlias,
@@ -126,6 +124,8 @@ class DirectoryStore(DirectoryWorkerStore):
409, "Room alias %s already exists" % room_alias.to_string()
)
+
+class DirectoryStore(DirectoryWorkerStore):
async def delete_room_alias(self, room_alias: RoomAlias) -> str:
room_id = await self.db_pool.runInteraction(
"delete_room_alias", self._delete_room_alias_txn, room_alias
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 1edc96042b..1f0a39eac4 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -755,81 +755,145 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore):
"""
@trace
- def _claim_e2e_one_time_keys(txn):
- sql = (
- "SELECT key_id, key_json FROM e2e_one_time_keys_json"
- " WHERE user_id = ? AND device_id = ? AND algorithm = ?"
- " LIMIT 1"
+ def _claim_e2e_one_time_key_simple(
+ txn, user_id: str, device_id: str, algorithm: str
+ ) -> Optional[Tuple[str, str]]:
+ """Claim OTK for device for DBs that don't support RETURNING.
+
+ Returns:
+ A tuple of key name (algorithm + key ID) and key JSON, if an
+ OTK was found.
+ """
+
+ sql = """
+ SELECT key_id, key_json FROM e2e_one_time_keys_json
+ WHERE user_id = ? AND device_id = ? AND algorithm = ?
+ LIMIT 1
+ """
+
+ txn.execute(sql, (user_id, device_id, algorithm))
+ otk_row = txn.fetchone()
+ if otk_row is None:
+ return None
+
+ key_id, key_json = otk_row
+
+ self.db_pool.simple_delete_one_txn(
+ txn,
+ table="e2e_one_time_keys_json",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ "algorithm": algorithm,
+ "key_id": key_id,
+ },
)
- fallback_sql = (
- "SELECT key_id, key_json, used FROM e2e_fallback_keys_json"
- " WHERE user_id = ? AND device_id = ? AND algorithm = ?"
- " LIMIT 1"
+ self._invalidate_cache_and_stream(
+ txn, self.count_e2e_one_time_keys, (user_id, device_id)
)
- result = {}
- delete = []
- used_fallbacks = []
- for user_id, device_id, algorithm in query_list:
- user_result = result.setdefault(user_id, {})
- device_result = user_result.setdefault(device_id, {})
- txn.execute(sql, (user_id, device_id, algorithm))
- otk_row = txn.fetchone()
- if otk_row is not None:
- key_id, key_json = otk_row
- device_result[algorithm + ":" + key_id] = key_json
- delete.append((user_id, device_id, algorithm, key_id))
- else:
- # no one-time key available, so see if there's a fallback
- # key
- txn.execute(fallback_sql, (user_id, device_id, algorithm))
- fallback_row = txn.fetchone()
- if fallback_row is not None:
- key_id, key_json, used = fallback_row
- device_result[algorithm + ":" + key_id] = key_json
- if not used:
- used_fallbacks.append(
- (user_id, device_id, algorithm, key_id)
- )
-
- # drop any one-time keys that were claimed
- sql = (
- "DELETE FROM e2e_one_time_keys_json"
- " WHERE user_id = ? AND device_id = ? AND algorithm = ?"
- " AND key_id = ?"
+
+ return f"{algorithm}:{key_id}", key_json
+
+ @trace
+ def _claim_e2e_one_time_key_returning(
+ txn, user_id: str, device_id: str, algorithm: str
+ ) -> Optional[Tuple[str, str]]:
+ """Claim OTK for device for DBs that support RETURNING.
+
+ Returns:
+ A tuple of key name (algorithm + key ID) and key JSON, if an
+ OTK was found.
+ """
+
+ # We can use RETURNING to do the fetch and DELETE in once step.
+ sql = """
+ DELETE FROM e2e_one_time_keys_json
+ WHERE user_id = ? AND device_id = ? AND algorithm = ?
+ AND key_id IN (
+ SELECT key_id FROM e2e_one_time_keys_json
+ WHERE user_id = ? AND device_id = ? AND algorithm = ?
+ LIMIT 1
+ )
+ RETURNING key_id, key_json
+ """
+
+ txn.execute(
+ sql, (user_id, device_id, algorithm, user_id, device_id, algorithm)
)
- for user_id, device_id, algorithm, key_id in delete:
- log_kv(
- {
- "message": "Executing claim e2e_one_time_keys transaction on database."
- }
- )
- txn.execute(sql, (user_id, device_id, algorithm, key_id))
- log_kv({"message": "finished executing and invalidating cache"})
- self._invalidate_cache_and_stream(
- txn, self.count_e2e_one_time_keys, (user_id, device_id)
+ otk_row = txn.fetchone()
+ if otk_row is None:
+ return None
+
+ key_id, key_json = otk_row
+ return f"{algorithm}:{key_id}", key_json
+
+ results = {}
+ for user_id, device_id, algorithm in query_list:
+ if self.database_engine.supports_returning:
+ # If we support RETURNING clause we can use a single query that
+ # allows us to use autocommit mode.
+ _claim_e2e_one_time_key = _claim_e2e_one_time_key_returning
+ db_autocommit = True
+ else:
+ _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple
+ db_autocommit = False
+
+ row = await self.db_pool.runInteraction(
+ "claim_e2e_one_time_keys",
+ _claim_e2e_one_time_key,
+ user_id,
+ device_id,
+ algorithm,
+ db_autocommit=db_autocommit,
+ )
+ if row:
+ device_results = results.setdefault(user_id, {}).setdefault(
+ device_id, {}
)
- # mark fallback keys as used
- for user_id, device_id, algorithm, key_id in used_fallbacks:
- self.db_pool.simple_update_txn(
- txn,
- "e2e_fallback_keys_json",
- {
+ device_results[row[0]] = row[1]
+ continue
+
+ # No one-time key available, so see if there's a fallback
+ # key
+ row = await self.db_pool.simple_select_one(
+ table="e2e_fallback_keys_json",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ "algorithm": algorithm,
+ },
+ retcols=("key_id", "key_json", "used"),
+ desc="_get_fallback_key",
+ allow_none=True,
+ )
+ if row is None:
+ continue
+
+ key_id = row["key_id"]
+ key_json = row["key_json"]
+ used = row["used"]
+
+ # Mark fallback key as used if not already.
+ if not used:
+ await self.db_pool.simple_update_one(
+ table="e2e_fallback_keys_json",
+ keyvalues={
"user_id": user_id,
"device_id": device_id,
"algorithm": algorithm,
"key_id": key_id,
},
- {"used": True},
+ updatevalues={"used": True},
+ desc="_get_fallback_key_set_used",
)
- self._invalidate_cache_and_stream(
- txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id)
+ await self.invalidate_cache_and_stream(
+ "get_e2e_unused_fallback_key_types", (user_id, device_id)
)
- return result
+ device_results = results.setdefault(user_id, {}).setdefault(device_id, {})
+ device_results[f"{algorithm}:{key_id}"] = key_json
- return await self.db_pool.runInteraction(
- "claim_e2e_one_time_keys", _claim_e2e_one_time_keys
- )
+ return results
class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 547e43ab98..bddf5ef192 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -16,11 +16,11 @@ import logging
from queue import Empty, PriorityQueue
from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple
-from prometheus_client import Gauge
+from prometheus_client import Counter, Gauge
from synapse.api.constants import MAX_DEPTH
from synapse.api.errors import StoreError
-from synapse.api.room_versions import RoomVersion
+from synapse.api.room_versions import EventFormatVersions, RoomVersion
from synapse.events import EventBase, make_event_from_dict
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -44,6 +44,12 @@ number_pdus_in_federation_queue = Gauge(
"The total number of events in the inbound federation staging",
)
+pdus_pruned_from_federation_queue = Counter(
+ "synapse_federation_server_number_inbound_pdu_pruned",
+ "The number of events in the inbound federation staging that have been "
+ "pruned due to the queue getting too long",
+)
+
logger = logging.getLogger(__name__)
@@ -665,27 +671,97 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
# Return all events where not all sets can reach them.
return {eid for eid, n in event_to_missing_sets.items() if n}
- async def get_oldest_events_with_depth_in_room(self, room_id):
+ async def get_oldest_event_ids_with_depth_in_room(self, room_id) -> Dict[str, int]:
+ """Gets the oldest events(backwards extremities) in the room along with the
+ aproximate depth.
+
+ We use this function so that we can compare and see if someones current
+ depth at their current scrollback is within pagination range of the
+ event extremeties. If the current depth is close to the depth of given
+ oldest event, we can trigger a backfill.
+
+ Args:
+ room_id: Room where we want to find the oldest events
+
+ Returns:
+ Map from event_id to depth
+ """
+
+ def get_oldest_event_ids_with_depth_in_room_txn(txn, room_id):
+ # Assemble a dictionary with event_id -> depth for the oldest events
+ # we know of in the room. Backwards extremeties are the oldest
+ # events we know of in the room but we only know of them because
+ # some other event referenced them by prev_event and aren't peristed
+ # in our database yet (meaning we don't know their depth
+ # specifically). So we need to look for the aproximate depth from
+ # the events connected to the current backwards extremeties.
+ sql = """
+ SELECT b.event_id, MAX(e.depth) FROM events as e
+ /**
+ * Get the edge connections from the event_edges table
+ * so we can see whether this event's prev_events points
+ * to a backward extremity in the next join.
+ */
+ INNER JOIN event_edges as g
+ ON g.event_id = e.event_id
+ /**
+ * We find the "oldest" events in the room by looking for
+ * events connected to backwards extremeties (oldest events
+ * in the room that we know of so far).
+ */
+ INNER JOIN event_backward_extremities as b
+ ON g.prev_event_id = b.event_id
+ WHERE b.room_id = ? AND g.is_state is ?
+ GROUP BY b.event_id
+ """
+
+ txn.execute(sql, (room_id, False))
+
+ return dict(txn)
+
return await self.db_pool.runInteraction(
- "get_oldest_events_with_depth_in_room",
- self.get_oldest_events_with_depth_in_room_txn,
+ "get_oldest_event_ids_with_depth_in_room",
+ get_oldest_event_ids_with_depth_in_room_txn,
room_id,
)
- def get_oldest_events_with_depth_in_room_txn(self, txn, room_id):
- sql = (
- "SELECT b.event_id, MAX(e.depth) FROM events as e"
- " INNER JOIN event_edges as g"
- " ON g.event_id = e.event_id"
- " INNER JOIN event_backward_extremities as b"
- " ON g.prev_event_id = b.event_id"
- " WHERE b.room_id = ? AND g.is_state is ?"
- " GROUP BY b.event_id"
- )
+ async def get_insertion_event_backwards_extremities_in_room(
+ self, room_id
+ ) -> Dict[str, int]:
+ """Get the insertion events we know about that we haven't backfilled yet.
- txn.execute(sql, (room_id, False))
+ We use this function so that we can compare and see if someones current
+ depth at their current scrollback is within pagination range of the
+ insertion event. If the current depth is close to the depth of given
+ insertion event, we can trigger a backfill.
- return dict(txn)
+ Args:
+ room_id: Room where we want to find the oldest events
+
+ Returns:
+ Map from event_id to depth
+ """
+
+ def get_insertion_event_backwards_extremities_in_room_txn(txn, room_id):
+ sql = """
+ SELECT b.event_id, MAX(e.depth) FROM insertion_events as i
+ /* We only want insertion events that are also marked as backwards extremities */
+ INNER JOIN insertion_event_extremities as b USING (event_id)
+ /* Get the depth of the insertion event from the events table */
+ INNER JOIN events AS e USING (event_id)
+ WHERE b.room_id = ?
+ GROUP BY b.event_id
+ """
+
+ txn.execute(sql, (room_id,))
+
+ return dict(txn)
+
+ return await self.db_pool.runInteraction(
+ "get_insertion_event_backwards_extremities_in_room",
+ get_insertion_event_backwards_extremities_in_room_txn,
+ room_id,
+ )
async def get_max_depth_of(self, event_ids: List[str]) -> Tuple[str, int]:
"""Returns the event ID and depth for the event that has the max depth from a set of event IDs
@@ -1035,7 +1111,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
if row[1] not in event_results:
queue.put((-row[0], row[1]))
- # Navigate up the DAG by prev_event
txn.execute(query, (event_id, False, limit - len(event_results)))
prev_event_id_results = txn.fetchall()
logger.debug(
@@ -1130,6 +1205,19 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
_delete_old_forward_extrem_cache_txn,
)
+ async def insert_insertion_extremity(self, event_id: str, room_id: str) -> None:
+ await self.db_pool.simple_upsert(
+ table="insertion_event_extremities",
+ keyvalues={"event_id": event_id},
+ values={
+ "event_id": event_id,
+ "room_id": room_id,
+ },
+ insertion_values={},
+ desc="insert_insertion_extremity",
+ lock=False,
+ )
+
async def insert_received_event_to_staging(
self, origin: str, event: EventBase
) -> None:
@@ -1277,6 +1365,100 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
return origin, event
+ async def prune_staged_events_in_room(
+ self,
+ room_id: str,
+ room_version: RoomVersion,
+ ) -> bool:
+ """Checks if there are lots of staged events for the room, and if so
+ prune them down.
+
+ Returns:
+ Whether any events were pruned
+ """
+
+ # First check the size of the queue.
+ count = await self.db_pool.simple_select_one_onecol(
+ table="federation_inbound_events_staging",
+ keyvalues={"room_id": room_id},
+ retcol="COALESCE(COUNT(*), 0)",
+ desc="prune_staged_events_in_room_count",
+ )
+
+ if count < 100:
+ return False
+
+ # If the queue is too large, then we want clear the entire queue,
+ # keeping only the forward extremities (i.e. the events not referenced
+ # by other events in the queue). We do this so that we can always
+ # backpaginate in all the events we have dropped.
+ rows = await self.db_pool.simple_select_list(
+ table="federation_inbound_events_staging",
+ keyvalues={"room_id": room_id},
+ retcols=("event_id", "event_json"),
+ desc="prune_staged_events_in_room_fetch",
+ )
+
+ # Find the set of events referenced by those in the queue, as well as
+ # collecting all the event IDs in the queue.
+ referenced_events: Set[str] = set()
+ seen_events: Set[str] = set()
+ for row in rows:
+ event_id = row["event_id"]
+ seen_events.add(event_id)
+ event_d = db_to_json(row["event_json"])
+
+ # We don't bother parsing the dicts into full blown event objects,
+ # as that is needlessly expensive.
+
+ # We haven't checked that the `prev_events` have the right format
+ # yet, so we check as we go.
+ prev_events = event_d.get("prev_events", [])
+ if not isinstance(prev_events, list):
+ logger.info("Invalid prev_events for %s", event_id)
+ continue
+
+ if room_version.event_format == EventFormatVersions.V1:
+ for prev_event_tuple in prev_events:
+ if not isinstance(prev_event_tuple, list) or len(prev_events) != 2:
+ logger.info("Invalid prev_events for %s", event_id)
+ break
+
+ prev_event_id = prev_event_tuple[0]
+ if not isinstance(prev_event_id, str):
+ logger.info("Invalid prev_events for %s", event_id)
+ break
+
+ referenced_events.add(prev_event_id)
+ else:
+ for prev_event_id in prev_events:
+ if not isinstance(prev_event_id, str):
+ logger.info("Invalid prev_events for %s", event_id)
+ break
+
+ referenced_events.add(prev_event_id)
+
+ to_delete = referenced_events & seen_events
+ if not to_delete:
+ return False
+
+ pdus_pruned_from_federation_queue.inc(len(to_delete))
+ logger.info(
+ "Pruning %d events in room %s from federation queue",
+ len(to_delete),
+ room_id,
+ )
+
+ await self.db_pool.simple_delete_many(
+ table="federation_inbound_events_staging",
+ keyvalues={"room_id": room_id},
+ iterable=to_delete,
+ column="event_id",
+ desc="prune_staged_events_in_room_delete",
+ )
+
+ return True
+
async def get_all_rooms_with_staged_incoming_events(self) -> List[str]:
"""Get the room IDs of all events currently staged."""
return await self.db_pool.simple_select_onecol(
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 2046b8e276..6a30aa6f81 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -578,7 +578,13 @@ class PersistEventsStore:
missing_auth_chains.clear()
- for auth_id, event_type, state_key, chain_id, sequence_number in txn:
+ for (
+ auth_id,
+ event_type,
+ state_key,
+ chain_id,
+ sequence_number,
+ ) in txn.fetchall():
event_to_types[auth_id] = (event_type, state_key)
if chain_id is None:
@@ -1382,18 +1388,18 @@ class PersistEventsStore:
# If we're persisting an unredacted event we go and ensure
# that we mark any redactions that reference this event as
# requiring censoring.
- sql = "UPDATE redactions SET have_censored = ? WHERE redacts = ?"
- txn.execute_batch(
- sql,
- (
- (
- False,
- event.event_id,
- )
- for event, _ in events_and_contexts
- if not event.internal_metadata.is_redacted()
- ),
+ unredacted_events = [
+ event.event_id
+ for event, _ in events_and_contexts
+ if not event.internal_metadata.is_redacted()
+ ]
+ sql = "UPDATE redactions SET have_censored = ? WHERE "
+ clause, args = make_in_list_sql_clause(
+ self.database_engine,
+ "redacts",
+ unredacted_events,
)
+ txn.execute(sql + clause, [False] + args)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
@@ -1773,10 +1779,21 @@ class PersistEventsStore:
# Not a insertion event
return
- # Skip processing a insertion event if the room version doesn't
- # support it.
+ # Skip processing an insertion event if the room version doesn't
+ # support it or the event is not from the room creator.
room_version = self.store.get_room_version_txn(txn, event.room_id)
- if not room_version.msc2716_historical:
+ room_creator = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": event.room_id},
+ retcol="creator",
+ allow_none=True,
+ )
+ if (
+ not room_version.msc2716_historical
+ or not self.hs.config.experimental.msc2716_enabled
+ or event.sender != room_creator
+ ):
return
next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID)
@@ -1825,9 +1842,20 @@ class PersistEventsStore:
return
# Skip processing a chunk event if the room version doesn't
- # support it.
+ # support it or the event is not from the room creator.
room_version = self.store.get_room_version_txn(txn, event.room_id)
- if not room_version.msc2716_historical:
+ room_creator = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": event.room_id},
+ retcol="creator",
+ allow_none=True,
+ )
+ if (
+ not room_version.msc2716_historical
+ or not self.hs.config.experimental.msc2716_enabled
+ or event.sender != room_creator
+ ):
return
chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID)
@@ -1848,6 +1876,18 @@ class PersistEventsStore:
},
)
+ # When we receive an event with a `chunk_id` referencing the
+ # `next_chunk_id` of the insertion event, we can remove it from the
+ # `insertion_event_extremities` table.
+ sql = """
+ DELETE FROM insertion_event_extremities WHERE event_id IN (
+ SELECT event_id FROM insertion_events
+ WHERE next_chunk_id = ?
+ )
+ """
+
+ txn.execute(sql, (chunk_id,))
+
def _handle_redaction(self, txn, redacted_event_id):
"""Handles receiving a redaction and checking whether we need to remove
any redacted relations from the database.
@@ -2104,15 +2144,17 @@ class PersistEventsStore:
Forward extremities are handled when we first start persisting the events.
"""
+ # From the events passed in, add all of the prev events as backwards extremities.
+ # Ignore any events that are already backwards extrems or outliers.
query = (
"INSERT INTO event_backward_extremities (event_id, room_id)"
" SELECT ?, ? WHERE NOT EXISTS ("
- " SELECT 1 FROM event_backward_extremities"
- " WHERE event_id = ? AND room_id = ?"
+ " SELECT 1 FROM event_backward_extremities"
+ " WHERE event_id = ? AND room_id = ?"
" )"
" AND NOT EXISTS ("
- " SELECT 1 FROM events WHERE event_id = ? AND room_id = ? "
- " AND outlier = ?"
+ " SELECT 1 FROM events WHERE event_id = ? AND room_id = ? "
+ " AND outlier = ?"
" )"
)
@@ -2126,6 +2168,8 @@ class PersistEventsStore:
],
)
+ # Delete all these events that we've already fetched and now know that their
+ # prev events are the new backwards extremeties.
query = (
"DELETE FROM event_backward_extremities"
" WHERE event_id = ? AND room_id = ?"
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 3c86adab56..9501f00f3b 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -14,7 +14,6 @@
import logging
import threading
-from collections import namedtuple
from typing import (
Collection,
Container,
@@ -27,6 +26,7 @@ from typing import (
overload,
)
+import attr
from constantly import NamedConstant, Names
from typing_extensions import Literal
@@ -42,7 +42,11 @@ from synapse.api.room_versions import (
from synapse.events import EventBase, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.events.utils import prune_event
-from synapse.logging.context import PreserveLoggingContext, current_context
+from synapse.logging.context import (
+ PreserveLoggingContext,
+ current_context,
+ make_deferred_yieldable,
+)
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
@@ -56,6 +60,8 @@ from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id
+from synapse.util import unwrapFirstError
+from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
from synapse.util.iterutils import batch_iter
@@ -74,7 +80,10 @@ EVENT_QUEUE_ITERATIONS = 3 # No. times we block waiting for requests for events
EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
-_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
+@attr.s(slots=True, auto_attribs=True)
+class _EventCacheEntry:
+ event: EventBase
+ redacted_event: Optional[EventBase]
class EventRedactBehaviour(Names):
@@ -161,6 +170,13 @@ class EventsWorkerStore(SQLBaseStore):
max_size=hs.config.caches.event_cache_size,
)
+ # Map from event ID to a deferred that will result in a map from event
+ # ID to cache entry. Note that the returned dict may not have the
+ # requested event in it if the event isn't in the DB.
+ self._current_event_fetches: Dict[
+ str, ObservableDeferred[Dict[str, _EventCacheEntry]]
+ ] = {}
+
self._event_fetch_lock = threading.Condition()
self._event_fetch_list = []
self._event_fetch_ongoing = 0
@@ -476,7 +492,9 @@ class EventsWorkerStore(SQLBaseStore):
return events
- async def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False):
+ async def _get_events_from_cache_or_db(
+ self, event_ids: Iterable[str], allow_rejected: bool = False
+ ) -> Dict[str, _EventCacheEntry]:
"""Fetch a bunch of events from the cache or the database.
If events are pulled from the database, they will be cached for future lookups.
@@ -485,53 +503,124 @@ class EventsWorkerStore(SQLBaseStore):
Args:
- event_ids (Iterable[str]): The event_ids of the events to fetch
+ event_ids: The event_ids of the events to fetch
- allow_rejected (bool): Whether to include rejected events. If False,
+ allow_rejected: Whether to include rejected events. If False,
rejected events are omitted from the response.
Returns:
- Dict[str, _EventCacheEntry]:
- map from event id to result
+ map from event id to result
"""
event_entry_map = self._get_events_from_cache(
- event_ids, allow_rejected=allow_rejected
+ event_ids,
)
- missing_events_ids = [e for e in event_ids if e not in event_entry_map]
+ missing_events_ids = {e for e in event_ids if e not in event_entry_map}
+
+ # We now look up if we're already fetching some of the events in the DB,
+ # if so we wait for those lookups to finish instead of pulling the same
+ # events out of the DB multiple times.
+ #
+ # Note: we might get the same `ObservableDeferred` back for multiple
+ # events we're already fetching, so we deduplicate the deferreds to
+ # avoid extraneous work (if we don't do this we can end up in a n^2 mode
+ # when we wait on the same Deferred N times, then try and merge the
+ # same dict into itself N times).
+ already_fetching_ids: Set[str] = set()
+ already_fetching_deferreds: Set[
+ ObservableDeferred[Dict[str, _EventCacheEntry]]
+ ] = set()
+
+ for event_id in missing_events_ids:
+ deferred = self._current_event_fetches.get(event_id)
+ if deferred is not None:
+ # We're already pulling the event out of the DB. Add the deferred
+ # to the collection of deferreds to wait on.
+ already_fetching_ids.add(event_id)
+ already_fetching_deferreds.add(deferred)
+
+ missing_events_ids.difference_update(already_fetching_ids)
if missing_events_ids:
log_ctx = current_context()
log_ctx.record_event_fetch(len(missing_events_ids))
+ # Add entries to `self._current_event_fetches` for each event we're
+ # going to pull from the DB. We use a single deferred that resolves
+ # to all the events we pulled from the DB (this will result in this
+ # function returning more events than requested, but that can happen
+ # already due to `_get_events_from_db`).
+ fetching_deferred: ObservableDeferred[
+ Dict[str, _EventCacheEntry]
+ ] = ObservableDeferred(defer.Deferred())
+ for event_id in missing_events_ids:
+ self._current_event_fetches[event_id] = fetching_deferred
+
# Note that _get_events_from_db is also responsible for turning db rows
# into FrozenEvents (via _get_event_from_row), which involves seeing if
# the events have been redacted, and if so pulling the redaction event out
# of the database to check it.
#
- missing_events = await self._get_events_from_db(
- missing_events_ids, allow_rejected=allow_rejected
- )
+ try:
+ missing_events = await self._get_events_from_db(
+ missing_events_ids,
+ )
+
+ event_entry_map.update(missing_events)
+ except Exception as e:
+ with PreserveLoggingContext():
+ fetching_deferred.errback(e)
+ raise e
+ finally:
+ # Ensure that we mark these events as no longer being fetched.
+ for event_id in missing_events_ids:
+ self._current_event_fetches.pop(event_id, None)
+
+ with PreserveLoggingContext():
+ fetching_deferred.callback(missing_events)
+
+ if already_fetching_deferreds:
+ # Wait for the other event requests to finish and add their results
+ # to ours.
+ results = await make_deferred_yieldable(
+ defer.gatherResults(
+ (d.observe() for d in already_fetching_deferreds),
+ consumeErrors=True,
+ )
+ ).addErrback(unwrapFirstError)
+
+ for result in results:
+ # We filter out events that we haven't asked for as we might get
+ # a *lot* of superfluous events back, and there is no point
+ # going through and inserting them all (which can take time).
+ event_entry_map.update(
+ (event_id, entry)
+ for event_id, entry in result.items()
+ if event_id in already_fetching_ids
+ )
- event_entry_map.update(missing_events)
+ if not allow_rejected:
+ event_entry_map = {
+ event_id: entry
+ for event_id, entry in event_entry_map.items()
+ if not entry.event.rejected_reason
+ }
return event_entry_map
def _invalidate_get_event_cache(self, event_id):
self._get_event_cache.invalidate((event_id,))
- def _get_events_from_cache(self, events, allow_rejected, update_metrics=True):
- """Fetch events from the caches
+ def _get_events_from_cache(
+ self, events: Iterable[str], update_metrics: bool = True
+ ) -> Dict[str, _EventCacheEntry]:
+ """Fetch events from the caches.
- Args:
- events (Iterable[str]): list of event_ids to fetch
- allow_rejected (bool): Whether to return events that were rejected
- update_metrics (bool): Whether to update the cache hit ratio metrics
+ May return rejected events.
- Returns:
- dict of event_id -> _EventCacheEntry for each event_id in cache. If
- allow_rejected is `False` then there will still be an entry but it
- will be `None`
+ Args:
+ events: list of event_ids to fetch
+ update_metrics: Whether to update the cache hit ratio metrics
"""
event_map = {}
@@ -542,10 +631,7 @@ class EventsWorkerStore(SQLBaseStore):
if not ret:
continue
- if allow_rejected or not ret.event.rejected_reason:
- event_map[event_id] = ret
- else:
- event_map[event_id] = None
+ event_map[event_id] = ret
return event_map
@@ -672,23 +758,23 @@ class EventsWorkerStore(SQLBaseStore):
with PreserveLoggingContext():
self.hs.get_reactor().callFromThread(fire, event_list, e)
- async def _get_events_from_db(self, event_ids, allow_rejected=False):
+ async def _get_events_from_db(
+ self, event_ids: Iterable[str]
+ ) -> Dict[str, _EventCacheEntry]:
"""Fetch a bunch of events from the database.
+ May return rejected events.
+
Returned events will be added to the cache for future lookups.
Unknown events are omitted from the response.
Args:
- event_ids (Iterable[str]): The event_ids of the events to fetch
-
- allow_rejected (bool): Whether to include rejected events. If False,
- rejected events are omitted from the response.
+ event_ids: The event_ids of the events to fetch
Returns:
- Dict[str, _EventCacheEntry]:
- map from event id to result. May return extra events which
- weren't asked for.
+ map from event id to result. May return extra events which
+ weren't asked for.
"""
fetched_events = {}
events_to_fetch = event_ids
@@ -717,9 +803,6 @@ class EventsWorkerStore(SQLBaseStore):
rejected_reason = row["rejected_reason"]
- if not allow_rejected and rejected_reason:
- continue
-
# If the event or metadata cannot be parsed, log the error and act
# as if the event is unknown.
try:
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index 1388771c40..12cf6995eb 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -29,7 +29,26 @@ if TYPE_CHECKING:
from synapse.server import HomeServer
-class PresenceStore(SQLBaseStore):
+class PresenceBackgroundUpdateStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: Connection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ # Used by `PresenceStore._get_active_presence()`
+ self.db_pool.updates.register_background_index_update(
+ "presence_stream_not_offline_index",
+ index_name="presence_stream_state_not_offline_idx",
+ table="presence_stream",
+ columns=["state"],
+ where_clause="state != 'offline'",
+ )
+
+
+class PresenceStore(PresenceBackgroundUpdateStore):
def __init__(
self,
database: DatabasePool,
@@ -332,6 +351,8 @@ class PresenceStore(SQLBaseStore):
the appropriate time outs.
"""
+ # The `presence_stream_state_not_offline_idx` index should be used for this
+ # query.
sql = (
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 664c65dac5..bccff5e5b9 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -295,6 +295,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
self._invalidate_cache_and_stream(
txn, self.have_seen_event, (room_id, event_id)
)
+ self._invalidate_get_event_cache(event_id)
logger.info("[purge] done")
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index b48fe086d4..63ac09c61d 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -48,6 +48,11 @@ class PusherWorkerStore(SQLBaseStore):
self._remove_stale_pushers,
)
+ self.db_pool.updates.register_background_update_handler(
+ "remove_deleted_email_pushers",
+ self._remove_deleted_email_pushers,
+ )
+
def _decode_pushers_rows(self, rows: Iterable[dict]) -> Iterator[PusherConfig]:
"""JSON-decode the data in the rows returned from the `pushers` table
@@ -388,6 +393,74 @@ class PusherWorkerStore(SQLBaseStore):
return number_deleted
+ async def _remove_deleted_email_pushers(
+ self, progress: dict, batch_size: int
+ ) -> int:
+ """A background update that deletes all pushers for deleted email addresses.
+
+ In previous versions of synapse, when users deleted their email address, it didn't
+ also delete all the pushers for that email address. This background update removes
+ those to prevent unwanted emails. This should only need to be run once (when users
+ upgrade to v1.42.0
+
+ Args:
+ progress: dict used to store progress of this background update
+ batch_size: the maximum number of rows to retrieve in a single select query
+
+ Returns:
+ The number of deleted rows
+ """
+
+ last_pusher = progress.get("last_pusher", 0)
+
+ def _delete_pushers(txn) -> int:
+
+ sql = """
+ SELECT p.id, p.user_name, p.app_id, p.pushkey
+ FROM pushers AS p
+ LEFT JOIN user_threepids AS t
+ ON t.user_id = p.user_name
+ AND t.medium = 'email'
+ AND t.address = p.pushkey
+ WHERE t.user_id is NULL
+ AND p.app_id = 'm.email'
+ AND p.id > ?
+ ORDER BY p.id ASC
+ LIMIT ?
+ """
+
+ txn.execute(sql, (last_pusher, batch_size))
+ rows = txn.fetchall()
+
+ last = None
+ num_deleted = 0
+ for row in rows:
+ last = row[0]
+ num_deleted += 1
+ self.db_pool.simple_delete_txn(
+ txn,
+ "pushers",
+ {"user_name": row[1], "app_id": row[2], "pushkey": row[3]},
+ )
+
+ if last is not None:
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "remove_deleted_email_pushers", {"last_pusher": last}
+ )
+
+ return num_deleted
+
+ number_deleted = await self.db_pool.runInteraction(
+ "_remove_deleted_email_pushers", _delete_pushers
+ )
+
+ if number_deleted < batch_size:
+ await self.db_pool.updates._end_background_update(
+ "remove_deleted_email_pushers"
+ )
+
+ return number_deleted
+
class PusherStore(PusherWorkerStore):
def get_pushers_stream_token(self) -> int:
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 6ad1a0cf7f..a6517962f6 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -29,7 +29,7 @@ from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.types import Connection, Cursor
from synapse.storage.util.id_generators import IdGenerator
from synapse.storage.util.sequence import build_sequence_generator
-from synapse.types import UserID
+from synapse.types import UserID, UserInfo
from synapse.util.caches.descriptors import cached
if TYPE_CHECKING:
@@ -146,6 +146,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
@cached()
async def get_user_by_id(self, user_id: str) -> Optional[Dict[str, Any]]:
+ """Deprecated: use get_userinfo_by_id instead"""
return await self.db_pool.simple_select_one(
table="users",
keyvalues={"name": user_id},
@@ -166,6 +167,33 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
desc="get_user_by_id",
)
+ async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
+ """Get a UserInfo object for a user by user ID.
+
+ Note! Currently uses the cache of `get_user_by_id`. Once that deprecated method is removed,
+ this method should be cached.
+
+ Args:
+ user_id: The user to fetch user info for.
+ Returns:
+ `UserInfo` object if user found, otherwise `None`.
+ """
+ user_data = await self.get_user_by_id(user_id)
+ if not user_data:
+ return None
+ return UserInfo(
+ appservice_id=user_data["appservice_id"],
+ consent_server_notice_sent=user_data["consent_server_notice_sent"],
+ consent_version=user_data["consent_version"],
+ creation_ts=user_data["creation_ts"],
+ is_admin=bool(user_data["admin"]),
+ is_deactivated=bool(user_data["deactivated"]),
+ is_guest=bool(user_data["is_guest"]),
+ is_shadow_banned=bool(user_data["shadow_banned"]),
+ user_id=UserID.from_string(user_data["name"]),
+ user_type=user_data["user_type"],
+ )
+
async def is_trial_user(self, user_id: str) -> bool:
"""Checks if user is in the "trial" period, i.e. within the first
N days of registration defined by `mau_trial_days` config
@@ -571,6 +599,28 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
desc="record_user_external_id",
)
+ async def remove_user_external_id(
+ self, auth_provider: str, external_id: str, user_id: str
+ ) -> None:
+ """Remove a mapping from an external user id to a mxid
+
+ If the mapping is not found, this method does nothing.
+
+ Args:
+ auth_provider: identifier for the remote auth provider
+ external_id: id on that system
+ user_id: complete mxid that it is mapped to
+ """
+ await self.db_pool.simple_delete(
+ table="user_external_ids",
+ keyvalues={
+ "auth_provider": auth_provider,
+ "external_id": external_id,
+ "user_id": user_id,
+ },
+ desc="remove_user_external_id",
+ )
+
async def get_user_by_external_id(
self, auth_provider: str, external_id: str
) -> Optional[str]:
@@ -704,16 +754,18 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
)
return user_id
- def get_user_id_by_threepid_txn(self, txn, medium, address):
+ def get_user_id_by_threepid_txn(
+ self, txn, medium: str, address: str
+ ) -> Optional[str]:
"""Returns user id from threepid
Args:
txn (cursor):
- medium (str): threepid medium e.g. email
- address (str): threepid address e.g. me@example.com
+ medium: threepid medium e.g. email
+ address: threepid address e.g. me@example.com
Returns:
- str|None: user id or None if no user id/threepid mapping exists
+ user id, or None if no user id/threepid mapping exists
"""
ret = self.db_pool.simple_select_one_txn(
txn,
@@ -726,14 +778,21 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
return ret["user_id"]
return None
- async def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
+ async def user_add_threepid(
+ self,
+ user_id: str,
+ medium: str,
+ address: str,
+ validated_at: int,
+ added_at: int,
+ ) -> None:
await self.db_pool.simple_upsert(
"user_threepids",
{"medium": medium, "address": address},
{"user_id": user_id, "validated_at": validated_at, "added_at": added_at},
)
- async def user_get_threepids(self, user_id):
+ async def user_get_threepids(self, user_id) -> List[Dict[str, Any]]:
return await self.db_pool.simple_select_list(
"user_threepids",
{"user_id": user_id},
@@ -741,7 +800,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
"user_get_threepids",
)
- async def user_delete_threepid(self, user_id, medium, address) -> None:
+ async def user_delete_threepid(
+ self, user_id: str, medium: str, address: str
+ ) -> None:
await self.db_pool.simple_delete(
"user_threepids",
keyvalues={"user_id": user_id, "medium": medium, "address": address},
@@ -1107,6 +1168,322 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
desc="update_access_token_last_validated",
)
+ async def registration_token_is_valid(self, token: str) -> bool:
+ """Checks if a token can be used to authenticate a registration.
+
+ Args:
+ token: The registration token to be checked
+ Returns:
+ True if the token is valid, False otherwise.
+ """
+ res = await self.db_pool.simple_select_one(
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcols=["uses_allowed", "pending", "completed", "expiry_time"],
+ allow_none=True,
+ )
+
+ # Check if the token exists
+ if res is None:
+ return False
+
+ # Check if the token has expired
+ now = self._clock.time_msec()
+ if res["expiry_time"] and res["expiry_time"] < now:
+ return False
+
+ # Check if the token has been used up
+ if (
+ res["uses_allowed"]
+ and res["pending"] + res["completed"] >= res["uses_allowed"]
+ ):
+ return False
+
+ # Otherwise, the token is valid
+ return True
+
+ async def set_registration_token_pending(self, token: str) -> None:
+ """Increment the pending registrations counter for a token.
+
+ Args:
+ token: The registration token pending use
+ """
+
+ def _set_registration_token_pending_txn(txn):
+ pending = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcol="pending",
+ )
+ self.db_pool.simple_update_one_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ updatevalues={"pending": pending + 1},
+ )
+
+ return await self.db_pool.runInteraction(
+ "set_registration_token_pending", _set_registration_token_pending_txn
+ )
+
+ async def use_registration_token(self, token: str) -> None:
+ """Complete a use of the given registration token.
+
+ The `pending` counter will be decremented, and the `completed`
+ counter will be incremented.
+
+ Args:
+ token: The registration token to be 'used'
+ """
+
+ def _use_registration_token_txn(txn):
+ # Normally, res is Optional[Dict[str, Any]].
+ # Override type because the return type is only optional if
+ # allow_none is True, and we don't want mypy throwing errors
+ # about None not being indexable.
+ res: Dict[str, Any] = self.db_pool.simple_select_one_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcols=["pending", "completed"],
+ ) # type: ignore
+
+ # Decrement pending and increment completed
+ self.db_pool.simple_update_one_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ updatevalues={
+ "completed": res["completed"] + 1,
+ "pending": res["pending"] - 1,
+ },
+ )
+
+ return await self.db_pool.runInteraction(
+ "use_registration_token", _use_registration_token_txn
+ )
+
+ async def get_registration_tokens(
+ self, valid: Optional[bool] = None
+ ) -> List[Dict[str, Any]]:
+ """List all registration tokens. Used by the admin API.
+
+ Args:
+ valid: If True, only valid tokens are returned.
+ If False, only invalid tokens are returned.
+ Default is None: return all tokens regardless of validity.
+
+ Returns:
+ A list of dicts, each containing details of a token.
+ """
+
+ def select_registration_tokens_txn(txn, now: int, valid: Optional[bool]):
+ if valid is None:
+ # Return all tokens regardless of validity
+ txn.execute("SELECT * FROM registration_tokens")
+
+ elif valid:
+ # Select valid tokens only
+ sql = (
+ "SELECT * FROM registration_tokens WHERE "
+ "(uses_allowed > pending + completed OR uses_allowed IS NULL) "
+ "AND (expiry_time > ? OR expiry_time IS NULL)"
+ )
+ txn.execute(sql, [now])
+
+ else:
+ # Select invalid tokens only
+ sql = (
+ "SELECT * FROM registration_tokens WHERE "
+ "uses_allowed <= pending + completed OR expiry_time <= ?"
+ )
+ txn.execute(sql, [now])
+
+ return self.db_pool.cursor_to_dict(txn)
+
+ return await self.db_pool.runInteraction(
+ "select_registration_tokens",
+ select_registration_tokens_txn,
+ self._clock.time_msec(),
+ valid,
+ )
+
+ async def get_one_registration_token(self, token: str) -> Optional[Dict[str, Any]]:
+ """Get info about the given registration token. Used by the admin API.
+
+ Args:
+ token: The token to retrieve information about.
+
+ Returns:
+ A dict, or None if token doesn't exist.
+ """
+ return await self.db_pool.simple_select_one(
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcols=["token", "uses_allowed", "pending", "completed", "expiry_time"],
+ allow_none=True,
+ desc="get_one_registration_token",
+ )
+
+ async def generate_registration_token(
+ self, length: int, chars: str
+ ) -> Optional[str]:
+ """Generate a random registration token. Used by the admin API.
+
+ Args:
+ length: The length of the token to generate.
+ chars: A string of the characters allowed in the generated token.
+
+ Returns:
+ The generated token.
+
+ Raises:
+ SynapseError if a unique registration token could still not be
+ generated after a few tries.
+ """
+ # Make a few attempts at generating a unique token of the required
+ # length before failing.
+ for _i in range(3):
+ # Generate token
+ token = "".join(random.choices(chars, k=length))
+
+ # Check if the token already exists
+ existing_token = await self.db_pool.simple_select_one_onecol(
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcol="token",
+ allow_none=True,
+ desc="check_if_registration_token_exists",
+ )
+
+ if existing_token is None:
+ # The generated token doesn't exist yet, return it
+ return token
+
+ raise SynapseError(
+ 500,
+ "Unable to generate a unique registration token. Try again with a greater length",
+ Codes.UNKNOWN,
+ )
+
+ async def create_registration_token(
+ self, token: str, uses_allowed: Optional[int], expiry_time: Optional[int]
+ ) -> bool:
+ """Create a new registration token. Used by the admin API.
+
+ Args:
+ token: The token to create.
+ uses_allowed: The number of times the token can be used to complete
+ a registration before it becomes invalid. A value of None indicates
+ unlimited uses.
+ expiry_time: The latest time the token is valid. Given as the
+ number of milliseconds since 1970-01-01 00:00:00 UTC. A value of
+ None indicates that the token does not expire.
+
+ Returns:
+ Whether the row was inserted or not.
+ """
+
+ def _create_registration_token_txn(txn):
+ row = self.db_pool.simple_select_one_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcols=["token"],
+ allow_none=True,
+ )
+
+ if row is not None:
+ # Token already exists
+ return False
+
+ self.db_pool.simple_insert_txn(
+ txn,
+ "registration_tokens",
+ values={
+ "token": token,
+ "uses_allowed": uses_allowed,
+ "pending": 0,
+ "completed": 0,
+ "expiry_time": expiry_time,
+ },
+ )
+
+ return True
+
+ return await self.db_pool.runInteraction(
+ "create_registration_token", _create_registration_token_txn
+ )
+
+ async def update_registration_token(
+ self, token: str, updatevalues: Dict[str, Optional[int]]
+ ) -> Optional[Dict[str, Any]]:
+ """Update a registration token. Used by the admin API.
+
+ Args:
+ token: The token to update.
+ updatevalues: A dict with the fields to update. E.g.:
+ `{"uses_allowed": 3}` to update just uses_allowed, or
+ `{"uses_allowed": 3, "expiry_time": None}` to update both.
+ This is passed straight to simple_update_one.
+
+ Returns:
+ A dict with all info about the token, or None if token doesn't exist.
+ """
+
+ def _update_registration_token_txn(txn):
+ try:
+ self.db_pool.simple_update_one_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ updatevalues=updatevalues,
+ )
+ except StoreError:
+ # Update failed because token does not exist
+ return None
+
+ # Get all info about the token so it can be sent in the response
+ return self.db_pool.simple_select_one_txn(
+ txn,
+ "registration_tokens",
+ keyvalues={"token": token},
+ retcols=[
+ "token",
+ "uses_allowed",
+ "pending",
+ "completed",
+ "expiry_time",
+ ],
+ allow_none=True,
+ )
+
+ return await self.db_pool.runInteraction(
+ "update_registration_token", _update_registration_token_txn
+ )
+
+ async def delete_registration_token(self, token: str) -> bool:
+ """Delete a registration token. Used by the admin API.
+
+ Args:
+ token: The token to delete.
+
+ Returns:
+ Whether the token was successfully deleted or not.
+ """
+ try:
+ await self.db_pool.simple_delete_one(
+ "registration_tokens",
+ keyvalues={"token": token},
+ desc="delete_registration_token",
+ )
+ except StoreError:
+ # Deletion failed because token does not exist
+ return False
+
+ return True
+
@cached()
async def mark_access_token_as_used(self, token_id: int) -> None:
"""
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 443e5f3315..6e7312266d 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -19,9 +19,10 @@ from abc import abstractmethod
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
-from synapse.api.constants import EventTypes, JoinRules
+from synapse.api.constants import EventContentFields, EventTypes, JoinRules
from synapse.api.errors import StoreError
from synapse.api.room_versions import RoomVersion, RoomVersions
+from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.search import SearchStore
@@ -73,6 +74,40 @@ class RoomWorkerStore(SQLBaseStore):
self.config = hs.config
+ async def store_room(
+ self,
+ room_id: str,
+ room_creator_user_id: str,
+ is_public: bool,
+ room_version: RoomVersion,
+ ):
+ """Stores a room.
+
+ Args:
+ room_id: The desired room ID, can be None.
+ room_creator_user_id: The user ID of the room creator.
+ is_public: True to indicate that this room should appear in
+ public room lists.
+ room_version: The version of the room
+ Raises:
+ StoreError if the room could not be stored.
+ """
+ try:
+ await self.db_pool.simple_insert(
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": room_creator_user_id,
+ "is_public": is_public,
+ "room_version": room_version.identifier,
+ "has_auth_chain_index": True,
+ },
+ desc="store_room",
+ )
+ except Exception as e:
+ logger.error("store_room with room_id=%s failed: %s", room_id, e)
+ raise StoreError(500, "Problem creating room.")
+
async def get_room(self, room_id: str) -> dict:
"""Retrieve a room.
@@ -890,55 +925,6 @@ class RoomWorkerStore(SQLBaseStore):
return total_media_quarantined
- async def get_all_new_public_rooms(
- self, instance_name: str, last_id: int, current_id: int, limit: int
- ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
- """Get updates for public rooms replication stream.
-
- Args:
- instance_name: The writer we want to fetch updates from. Unused
- here since there is only ever one writer.
- last_id: The token to fetch updates from. Exclusive.
- current_id: The token to fetch updates up to. Inclusive.
- limit: The requested limit for the number of rows to return. The
- function may return more or fewer rows.
-
- Returns:
- A tuple consisting of: the updates, a token to use to fetch
- subsequent updates, and whether we returned fewer rows than exists
- between the requested tokens due to the limit.
-
- The token returned can be used in a subsequent call to this
- function to get further updatees.
-
- The updates are a list of 2-tuples of stream ID and the row data
- """
- if last_id == current_id:
- return [], current_id, False
-
- def get_all_new_public_rooms(txn):
- sql = """
- SELECT stream_id, room_id, visibility, appservice_id, network_id
- FROM public_room_list_stream
- WHERE stream_id > ? AND stream_id <= ?
- ORDER BY stream_id ASC
- LIMIT ?
- """
-
- txn.execute(sql, (last_id, current_id, limit))
- updates = [(row[0], row[1:]) for row in txn]
- limited = False
- upto_token = current_id
- if len(updates) >= limit:
- upto_token = updates[-1][0]
- limited = True
-
- return updates, upto_token, limited
-
- return await self.db_pool.runInteraction(
- "get_all_new_public_rooms", get_all_new_public_rooms
- )
-
async def get_rooms_for_retention_period_in_range(
self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
) -> Dict[str, dict]:
@@ -1028,6 +1014,7 @@ class _BackgroundUpdates:
ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column"
POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2"
REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth"
+ POPULATE_ROOMS_CREATOR_COLUMN = "populate_rooms_creator_column"
_REPLACE_ROOM_DEPTH_SQL_COMMANDS = (
@@ -1069,6 +1056,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
self._background_replace_room_depth_min_depth,
)
+ self.db_pool.updates.register_background_update_handler(
+ _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
+ self._background_populate_rooms_creator_column,
+ )
+
async def _background_insert_retention(self, progress, batch_size):
"""Retrieves a list of all rooms within a range and inserts an entry for each of
them into the room_retention table.
@@ -1288,7 +1280,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
keyvalues={"room_id": room_id},
retcol="MAX(stream_ordering)",
allow_none=True,
- desc="upsert_room_on_join",
+ desc="has_auth_chain_index_fallback",
)
return max_ordering is None
@@ -1358,6 +1350,65 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
return 0
+ async def _background_populate_rooms_creator_column(
+ self, progress: dict, batch_size: int
+ ):
+ """Background update to go and add creator information to `rooms`
+ table from `current_state_events` table.
+ """
+
+ last_room_id = progress.get("room_id", "")
+
+ def _background_populate_rooms_creator_column_txn(txn: LoggingTransaction):
+ sql = """
+ SELECT room_id, json FROM event_json
+ INNER JOIN rooms AS room USING (room_id)
+ INNER JOIN current_state_events AS state_event USING (room_id, event_id)
+ WHERE room_id > ? AND (room.creator IS NULL OR room.creator = '') AND state_event.type = 'm.room.create' AND state_event.state_key = ''
+ ORDER BY room_id
+ LIMIT ?
+ """
+
+ txn.execute(sql, (last_room_id, batch_size))
+ room_id_to_create_event_results = txn.fetchall()
+
+ new_last_room_id = ""
+ for room_id, event_json in room_id_to_create_event_results:
+ event_dict = db_to_json(event_json)
+
+ creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR)
+
+ self.db_pool.simple_update_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"creator": creator},
+ )
+ new_last_room_id = room_id
+
+ if new_last_room_id == "":
+ return True
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
+ {"room_id": new_last_room_id},
+ )
+
+ return False
+
+ end = await self.db_pool.runInteraction(
+ "_background_populate_rooms_creator_column",
+ _background_populate_rooms_creator_column_txn,
+ )
+
+ if end:
+ await self.db_pool.updates._end_background_update(
+ _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN
+ )
+
+ return batch_size
+
class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
def __init__(self, database: DatabasePool, db_conn, hs):
@@ -1365,7 +1416,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
self.config = hs.config
- async def upsert_room_on_join(self, room_id: str, room_version: RoomVersion):
+ async def upsert_room_on_join(
+ self, room_id: str, room_version: RoomVersion, auth_events: List[EventBase]
+ ):
"""Ensure that the room is stored in the table
Called when we join a room over federation, and overwrites any room version
@@ -1376,6 +1429,24 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
# mark the room as having an auth chain cover index.
has_auth_chain_index = await self.has_auth_chain_index(room_id)
+ create_event = None
+ for e in auth_events:
+ if (e.type, e.state_key) == (EventTypes.Create, ""):
+ create_event = e
+ break
+
+ if create_event is None:
+ # If the state doesn't have a create event then the room is
+ # invalid, and it would fail auth checks anyway.
+ raise StoreError(400, "No create event in state")
+
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+
+ if not isinstance(room_creator, str):
+ # If the create event does not have a creator then the room is
+ # invalid, and it would fail auth checks anyway.
+ raise StoreError(400, "No creator defined on the create event")
+
await self.db_pool.simple_upsert(
desc="upsert_room_on_join",
table="rooms",
@@ -1383,7 +1454,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
values={"room_version": room_version.identifier},
insertion_values={
"is_public": False,
- "creator": "",
+ "creator": room_creator,
"has_auth_chain_index": has_auth_chain_index,
},
# rooms has a unique constraint on room_id, so no need to lock when doing an
@@ -1391,57 +1462,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
lock=False,
)
- async def store_room(
- self,
- room_id: str,
- room_creator_user_id: str,
- is_public: bool,
- room_version: RoomVersion,
- ):
- """Stores a room.
-
- Args:
- room_id: The desired room ID, can be None.
- room_creator_user_id: The user ID of the room creator.
- is_public: True to indicate that this room should appear in
- public room lists.
- room_version: The version of the room
- Raises:
- StoreError if the room could not be stored.
- """
- try:
-
- def store_room_txn(txn, next_id):
- self.db_pool.simple_insert_txn(
- txn,
- "rooms",
- {
- "room_id": room_id,
- "creator": room_creator_user_id,
- "is_public": is_public,
- "room_version": room_version.identifier,
- "has_auth_chain_index": True,
- },
- )
- if is_public:
- self.db_pool.simple_insert_txn(
- txn,
- table="public_room_list_stream",
- values={
- "stream_id": next_id,
- "room_id": room_id,
- "visibility": is_public,
- },
- )
-
- async with self._public_room_id_gen.get_next() as next_id:
- await self.db_pool.runInteraction(
- "store_room_txn", store_room_txn, next_id
- )
- except Exception as e:
- logger.error("store_room with room_id=%s failed: %s", room_id, e)
- raise StoreError(500, "Problem creating room.")
-
async def maybe_store_room_on_outlier_membership(
self, room_id: str, room_version: RoomVersion
):
@@ -1462,6 +1482,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
insertion_values={
"room_version": room_version.identifier,
"is_public": False,
+ # We don't worry about setting the `creator` here because
+ # we don't process any messages in a room while a user is
+ # invited (only after the join).
"creator": "",
"has_auth_chain_index": has_auth_chain_index,
},
@@ -1470,49 +1493,14 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
lock=False,
)
- async def set_room_is_public(self, room_id, is_public):
- def set_room_is_public_txn(txn, next_id):
- self.db_pool.simple_update_one_txn(
- txn,
- table="rooms",
- keyvalues={"room_id": room_id},
- updatevalues={"is_public": is_public},
- )
-
- entries = self.db_pool.simple_select_list_txn(
- txn,
- table="public_room_list_stream",
- keyvalues={
- "room_id": room_id,
- "appservice_id": None,
- "network_id": None,
- },
- retcols=("stream_id", "visibility"),
- )
-
- entries.sort(key=lambda r: r["stream_id"])
-
- add_to_stream = True
- if entries:
- add_to_stream = bool(entries[-1]["visibility"]) != is_public
-
- if add_to_stream:
- self.db_pool.simple_insert_txn(
- txn,
- table="public_room_list_stream",
- values={
- "stream_id": next_id,
- "room_id": room_id,
- "visibility": is_public,
- "appservice_id": None,
- "network_id": None,
- },
- )
+ async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
+ await self.db_pool.simple_update_one(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"is_public": is_public},
+ desc="set_room_is_public",
+ )
- async with self._public_room_id_gen.get_next() as next_id:
- await self.db_pool.runInteraction(
- "set_room_is_public", set_room_is_public_txn, next_id
- )
self.hs.get_notifier().on_new_replication_data()
async def set_room_is_public_appservice(
@@ -1533,68 +1521,33 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
list.
"""
- def set_room_is_public_appservice_txn(txn, next_id):
- if is_public:
- try:
- self.db_pool.simple_insert_txn(
- txn,
- table="appservice_room_list",
- values={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- )
- except self.database_engine.module.IntegrityError:
- # We've already inserted, nothing to do.
- return
- else:
- self.db_pool.simple_delete_txn(
- txn,
- table="appservice_room_list",
- keyvalues={
- "appservice_id": appservice_id,
- "network_id": network_id,
- "room_id": room_id,
- },
- )
-
- entries = self.db_pool.simple_select_list_txn(
- txn,
- table="public_room_list_stream",
+ if is_public:
+ await self.db_pool.simple_upsert(
+ table="appservice_room_list",
keyvalues={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
"room_id": room_id,
+ },
+ values={},
+ insertion_values={
"appservice_id": appservice_id,
"network_id": network_id,
+ "room_id": room_id,
},
- retcols=("stream_id", "visibility"),
+ desc="set_room_is_public_appservice_true",
)
-
- entries.sort(key=lambda r: r["stream_id"])
-
- add_to_stream = True
- if entries:
- add_to_stream = bool(entries[-1]["visibility"]) != is_public
-
- if add_to_stream:
- self.db_pool.simple_insert_txn(
- txn,
- table="public_room_list_stream",
- values={
- "stream_id": next_id,
- "room_id": room_id,
- "visibility": is_public,
- "appservice_id": appservice_id,
- "network_id": network_id,
- },
- )
-
- async with self._public_room_id_gen.get_next() as next_id:
- await self.db_pool.runInteraction(
- "set_room_is_public_appservice",
- set_room_is_public_appservice_txn,
- next_id,
+ else:
+ await self.db_pool.simple_delete(
+ table="appservice_room_list",
+ keyvalues={
+ "appservice_id": appservice_id,
+ "network_id": network_id,
+ "room_id": room_id,
+ },
+ desc="set_room_is_public_appservice_false",
)
+
self.hs.get_notifier().on_new_replication_data()
async def add_event_report(
@@ -1787,9 +1740,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
"get_event_reports_paginate", _get_event_reports_paginate_txn
)
- def get_current_public_room_stream_id(self):
- return self._public_room_id_gen.get_current_token()
-
async def block_room(self, room_id: str, user_id: str) -> None:
"""Marks the room as blocked. Can be called multiple times.
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 68f1b40ea6..c58a4b8690 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -307,7 +307,9 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
@cached()
- async def get_invited_rooms_for_local_user(self, user_id: str) -> RoomsForUser:
+ async def get_invited_rooms_for_local_user(
+ self, user_id: str
+ ) -> List[RoomsForUser]:
"""Get all the rooms the *local* user is invited to.
Args:
@@ -384,9 +386,10 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
sql = """
- SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
+ SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering, r.room_version
FROM local_current_membership AS c
INNER JOIN events AS e USING (room_id, event_id)
+ INNER JOIN rooms AS r USING (room_id)
WHERE
user_id = ?
AND %s
@@ -395,7 +398,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
txn.execute(sql, (user_id, *args))
- results = [RoomsForUser(**r) for r in self.db_pool.cursor_to_dict(txn)]
+ results = [RoomsForUser(*r) for r in txn]
return results
@@ -445,7 +448,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
Returns:
Returns the rooms the user is in currently, along with the stream
- ordering of the most recent join for that user and room.
+ ordering of the most recent join for that user and room, along with
+ the room version of the room.
"""
return await self.db_pool.runInteraction(
"get_rooms_for_user_with_stream_ordering",
@@ -522,7 +526,9 @@ class RoomMemberWorkerStore(EventsWorkerStore):
_get_users_server_still_shares_room_with_txn,
)
- async def get_rooms_for_user(self, user_id: str, on_invalidate=None):
+ async def get_rooms_for_user(
+ self, user_id: str, on_invalidate=None
+ ) -> FrozenSet[str]:
"""Returns a set of room_ids the user is currently joined to.
If a remote user only returns rooms this server is currently
@@ -629,14 +635,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# We don't update the event cache hit ratio as it completely throws off
# the hit ratio counts. After all, we don't populate the cache if we
# miss it here
- event_map = self._get_events_from_cache(
- member_event_ids, allow_rejected=False, update_metrics=False
- )
+ event_map = self._get_events_from_cache(member_event_ids, update_metrics=False)
missing_member_event_ids = []
for event_id in member_event_ids:
ev_entry = event_map.get(event_id)
- if ev_entry:
+ if ev_entry and not ev_entry.event.rejected_reason:
if ev_entry.event.membership == Membership.JOIN:
users_in_room[ev_entry.event.state_key] = ProfileInfo(
display_name=ev_entry.event.content.get("displayname", None),
diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py
new file mode 100644
index 0000000000..172f27d109
--- /dev/null
+++ b/synapse/storage/databases/main/session.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+import synapse.util.stringutils as stringutils
+from synapse.api.errors import StoreError
+from synapse.metrics.background_process_metrics import wrap_as_background_process
+from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
+from synapse.types import JsonDict
+from synapse.util import json_encoder
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+class SessionStore(SQLBaseStore):
+ """
+ A store for generic session data.
+
+ Each type of session should provide a unique type (to separate sessions).
+
+ Sessions are automatically removed when they expire.
+ """
+
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ # Create a background job for culling expired sessions.
+ if hs.config.run_background_tasks:
+ self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000)
+
+ async def create_session(
+ self, session_type: str, value: JsonDict, expiry_ms: int
+ ) -> str:
+ """
+ Creates a new pagination session for the room hierarchy endpoint.
+
+ Args:
+ session_type: The type for this session.
+ value: The value to store.
+ expiry_ms: How long before an item is evicted from the cache
+ in milliseconds. Default is 0, indicating items never get
+ evicted based on time.
+
+ Returns:
+ The newly created session ID.
+
+ Raises:
+ StoreError if a unique session ID cannot be generated.
+ """
+ # autogen a session ID and try to create it. We may clash, so just
+ # try a few times till one goes through, giving up eventually.
+ attempts = 0
+ while attempts < 5:
+ session_id = stringutils.random_string(24)
+
+ try:
+ await self.db_pool.simple_insert(
+ table="sessions",
+ values={
+ "session_id": session_id,
+ "session_type": session_type,
+ "value": json_encoder.encode(value),
+ "expiry_time_ms": self.hs.get_clock().time_msec() + expiry_ms,
+ },
+ desc="create_session",
+ )
+
+ return session_id
+ except self.db_pool.engine.module.IntegrityError:
+ attempts += 1
+ raise StoreError(500, "Couldn't generate a session ID.")
+
+ async def get_session(self, session_type: str, session_id: str) -> JsonDict:
+ """
+ Retrieve data stored with create_session
+
+ Args:
+ session_type: The type for this session.
+ session_id: The session ID returned from create_session.
+
+ Raises:
+ StoreError if the session cannot be found.
+ """
+
+ def _get_session(
+ txn: LoggingTransaction, session_type: str, session_id: str, ts: int
+ ) -> JsonDict:
+ # This includes the expiry time since items are only periodically
+ # deleted, not upon expiry.
+ select_sql = """
+ SELECT value FROM sessions WHERE
+ session_type = ? AND session_id = ? AND expiry_time_ms > ?
+ """
+ txn.execute(select_sql, [session_type, session_id, ts])
+ row = txn.fetchone()
+
+ if not row:
+ raise StoreError(404, "No session")
+
+ return db_to_json(row[0])
+
+ return await self.db_pool.runInteraction(
+ "get_session",
+ _get_session,
+ session_type,
+ session_id,
+ self._clock.time_msec(),
+ )
+
+ @wrap_as_background_process("delete_expired_sessions")
+ async def _delete_expired_sessions(self) -> None:
+ """Remove sessions with expiry dates that have passed."""
+
+ def _delete_expired_sessions_txn(txn: LoggingTransaction, ts: int) -> None:
+ sql = "DELETE FROM sessions WHERE expiry_time_ms <= ?"
+ txn.execute(sql, (ts,))
+
+ await self.db_pool.runInteraction(
+ "delete_expired_sessions",
+ _delete_expired_sessions_txn,
+ self._clock.time_msec(),
+ )
diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py
index 38bfdf5dad..4d6bbc94c7 100644
--- a/synapse/storage/databases/main/ui_auth.py
+++ b/synapse/storage/databases/main/ui_auth.py
@@ -15,6 +15,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import attr
+from synapse.api.constants import LoginType
from synapse.api.errors import StoreError
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import LoggingTransaction
@@ -329,6 +330,48 @@ class UIAuthWorkerStore(SQLBaseStore):
keyvalues={},
)
+ # If a registration token was used, decrement the pending counter
+ # before deleting the session.
+ rows = self.db_pool.simple_select_many_txn(
+ txn,
+ table="ui_auth_sessions_credentials",
+ column="session_id",
+ iterable=session_ids,
+ keyvalues={"stage_type": LoginType.REGISTRATION_TOKEN},
+ retcols=["result"],
+ )
+
+ # Get the tokens used and how much pending needs to be decremented by.
+ token_counts: Dict[str, int] = {}
+ for r in rows:
+ # If registration was successfully completed, the result of the
+ # registration token stage for that session will be True.
+ # If a token was used to authenticate, but registration was
+ # never completed, the result will be the token used.
+ token = db_to_json(r["result"])
+ if isinstance(token, str):
+ token_counts[token] = token_counts.get(token, 0) + 1
+
+ # Update the `pending` counters.
+ if len(token_counts) > 0:
+ token_rows = self.db_pool.simple_select_many_txn(
+ txn,
+ table="registration_tokens",
+ column="token",
+ iterable=list(token_counts.keys()),
+ keyvalues={},
+ retcols=["token", "pending"],
+ )
+ for token_row in token_rows:
+ token = token_row["token"]
+ new_pending = token_row["pending"] - token_counts[token]
+ self.db_pool.simple_update_one_txn(
+ txn,
+ table="registration_tokens",
+ keyvalues={"token": token},
+ updatevalues={"pending": new_pending},
+ )
+
# Delete the corresponding completed credentials.
self.db_pool.simple_delete_many_txn(
txn,
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 9d28d69ac7..65dde67ae9 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -365,7 +365,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
return False
async def update_profile_in_user_dir(
- self, user_id: str, display_name: str, avatar_url: str
+ self, user_id: str, display_name: Optional[str], avatar_url: Optional[str]
) -> None:
"""
Update or add a user's profile in the user directory.
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index c34fbf21bc..2500381b7b 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -14,25 +14,41 @@
# limitations under the License.
import logging
-from collections import namedtuple
+from typing import List, Optional, Tuple
+
+import attr
+
+from synapse.types import PersistedEventPosition
logger = logging.getLogger(__name__)
-RoomsForUser = namedtuple(
- "RoomsForUser", ("room_id", "sender", "membership", "event_id", "stream_ordering")
-)
+@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
+class RoomsForUser:
+ room_id: str
+ sender: str
+ membership: str
+ event_id: str
+ stream_ordering: int
+ room_version_id: str
+
+
+@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
+class GetRoomsForUserWithStreamOrdering:
+ room_id: str
+ event_pos: PersistedEventPosition
-GetRoomsForUserWithStreamOrdering = namedtuple(
- "GetRoomsForUserWithStreamOrdering", ("room_id", "event_pos")
-)
+@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
+class ProfileInfo:
+ avatar_url: Optional[str]
+ display_name: Optional[str]
-# We store this using a namedtuple so that we save about 3x space over using a
-# dict.
-ProfileInfo = namedtuple("ProfileInfo", ("avatar_url", "display_name"))
-# "members" points to a truncated list of (user_id, event_id) tuples for users of
-# a given membership type, suitable for use in calculating heroes for a room.
-# "count" points to the total numberr of users of a given membership type.
-MemberSummary = namedtuple("MemberSummary", ("members", "count"))
+@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
+class MemberSummary:
+ # A truncated list of (user_id, event_id) tuples for users of a given
+ # membership type, suitable for use in calculating heroes for a room.
+ members: List[Tuple[str, str]]
+ # The total number of users of a given membership type.
+ count: int
diff --git a/synapse/storage/schema/README.md b/synapse/storage/schema/README.md
index 729f44ea6c..4fc2061a3d 100644
--- a/synapse/storage/schema/README.md
+++ b/synapse/storage/schema/README.md
@@ -1,4 +1,4 @@
# Synapse Database Schemas
This directory contains the schema files used to build Synapse databases. For more
-information, see /docs/development/database_schema.md.
+information, see https://matrix-org.github.io/synapse/develop/development/database_schema.html.
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 36340a652a..af9cc69949 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,19 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-SCHEMA_VERSION = 61
+# When updating these values, please leave a short summary of the changes below.
+
+SCHEMA_VERSION = 63
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
shape of the database schema (even if those requirements are backwards-compatible with
older versions of Synapse).
-See `README.md <synapse/storage/schema/README.md>`_ for more information on how this
-works.
+See https://matrix-org.github.io/synapse/develop/development/database_schema.html
+for more information on how this works.
Changes in SCHEMA_VERSION = 61:
- The `user_stats_historical` and `room_stats_historical` tables are not written and
are not read (previously, they were written but not read).
+
+Changes in SCHEMA_VERSION = 63:
+ - The `public_room_list_stream` table is not written nor read to
+ (previously, it was written and read to, but not for any significant purpose).
+ https://github.com/matrix-org/synapse/pull/10565
"""
diff --git a/synapse/storage/schema/main/delta/62/01insertion_event_extremities.sql b/synapse/storage/schema/main/delta/62/01insertion_event_extremities.sql
new file mode 100644
index 0000000000..b731ef284a
--- /dev/null
+++ b/synapse/storage/schema/main/delta/62/01insertion_event_extremities.sql
@@ -0,0 +1,24 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- Add a table that keeps track of which "insertion" events need to be backfilled
+CREATE TABLE IF NOT EXISTS insertion_event_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL
+);
+
+CREATE UNIQUE INDEX IF NOT EXISTS insertion_event_extremities_event_id ON insertion_event_extremities(event_id);
+CREATE INDEX IF NOT EXISTS insertion_event_extremities_room_id ON insertion_event_extremities(room_id);
diff --git a/synapse/storage/schema/main/delta/63/01create_registration_tokens.sql b/synapse/storage/schema/main/delta/63/01create_registration_tokens.sql
new file mode 100644
index 0000000000..ee6cf958f4
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/01create_registration_tokens.sql
@@ -0,0 +1,23 @@
+/* Copyright 2021 Callum Brown
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS registration_tokens(
+ token TEXT NOT NULL, -- The token that can be used for authentication.
+ uses_allowed INT, -- The total number of times this token can be used. NULL if no limit.
+ pending INT NOT NULL, -- The number of in progress registrations using this token.
+ completed INT NOT NULL, -- The number of times this token has been used to complete a registration.
+ expiry_time BIGINT, -- The latest time this token will be valid (epoch time in milliseconds). NULL if token doesn't expire.
+ UNIQUE (token)
+);
diff --git a/synapse/storage/schema/main/delta/63/02delete_unlinked_email_pushers.sql b/synapse/storage/schema/main/delta/63/02delete_unlinked_email_pushers.sql
new file mode 100644
index 0000000000..611c4b95cf
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/02delete_unlinked_email_pushers.sql
@@ -0,0 +1,20 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- We may not have deleted all pushers for emails that are no longer linked
+-- to an account, so we set up a background job to delete them.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (6302, 'remove_deleted_email_pushers', '{}');
diff --git a/synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql b/synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql
new file mode 100644
index 0000000000..f7c0b31261
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql
@@ -0,0 +1,17 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json)
+ VALUES (6302, 'populate_rooms_creator_column', '{}');
diff --git a/synapse/storage/schema/main/delta/63/03session_store.sql b/synapse/storage/schema/main/delta/63/03session_store.sql
new file mode 100644
index 0000000000..535fb34c10
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/03session_store.sql
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2021 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS sessions(
+ session_type TEXT NOT NULL, -- The unique key for this type of session.
+ session_id TEXT NOT NULL, -- The session ID passed to the client.
+ value TEXT NOT NULL, -- A JSON dictionary to persist.
+ expiry_time_ms BIGINT NOT NULL, -- The time this session will expire (epoch time in milliseconds).
+ UNIQUE (session_type, session_id)
+);
diff --git a/synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql b/synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql
new file mode 100644
index 0000000000..b90856004b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2021 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (6304, 'presence_stream_not_offline_index', '{}');
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index c768fdea56..6f7cbe40f4 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -19,6 +19,7 @@ from contextlib import contextmanager
from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
import attr
+from sortedcontainers import SortedSet
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
@@ -240,7 +241,7 @@ class MultiWriterIdGenerator:
# Set of local IDs that we're still processing. The current position
# should be less than the minimum of this set (if not empty).
- self._unfinished_ids: Set[int] = set()
+ self._unfinished_ids: SortedSet[int] = SortedSet()
# Set of local IDs that we've processed that are larger than the current
# position, due to there being smaller unpersisted IDs.
@@ -473,7 +474,7 @@ class MultiWriterIdGenerator:
finished = set()
- min_unfinshed = min(self._unfinished_ids)
+ min_unfinshed = self._unfinished_ids[0]
for s in self._finished_ids:
if s < min_unfinshed:
if new_cur is None or new_cur < s:
diff --git a/synapse/types.py b/synapse/types.py
index 429bb013d2..80fa903c4b 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -751,3 +751,32 @@ def get_verify_key_from_cross_signing_key(key_info):
# and return that one key
for key_id, key_data in keys.items():
return (key_id, decode_verify_key_bytes(key_id, decode_base64(key_data)))
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class UserInfo:
+ """Holds information about a user. Result of get_userinfo_by_id.
+
+ Attributes:
+ user_id: ID of the user.
+ appservice_id: Application service ID that created this user.
+ consent_server_notice_sent: Version of policy documents the user has been sent.
+ consent_version: Version of policy documents the user has consented to.
+ creation_ts: Creation timestamp of the user.
+ is_admin: True if the user is an admin.
+ is_deactivated: True if the user has been deactivated.
+ is_guest: True if the user is a guest user.
+ is_shadow_banned: True if the user has been shadow-banned.
+ user_type: User type (None for normal user, 'support' and 'bot' other options).
+ """
+
+ user_id: UserID
+ appservice_id: Optional[int]
+ consent_server_notice_sent: Optional[str]
+ consent_version: Optional[str]
+ user_type: Optional[str]
+ creation_ts: int
+ is_admin: bool
+ is_deactivated: bool
+ is_guest: bool
+ is_shadow_banned: bool
diff --git a/synapse/util/jsonobject.py b/synapse/util/jsonobject.py
deleted file mode 100644
index abc12f0837..0000000000
--- a/synapse/util/jsonobject.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class JsonEncodedObject:
- """A common base class for defining protocol units that are represented
- as JSON.
-
- Attributes:
- unrecognized_keys (dict): A dict containing all the key/value pairs we
- don't recognize.
- """
-
- valid_keys = [] # keys we will store
- """A list of strings that represent keys we know about
- and can handle. If we have values for these keys they will be
- included in the `dictionary` instance variable.
- """
-
- internal_keys = [] # keys to ignore while building dict
- """A list of strings that should *not* be encoded into JSON.
- """
-
- required_keys = []
- """A list of strings that we require to exist. If they are not given upon
- construction it raises an exception.
- """
-
- def __init__(self, **kwargs):
- """Takes the dict of `kwargs` and loads all keys that are *valid*
- (i.e., are included in the `valid_keys` list) into the dictionary`
- instance variable.
-
- Any keys that aren't recognized are added to the `unrecognized_keys`
- attribute.
-
- Args:
- **kwargs: Attributes associated with this protocol unit.
- """
- for required_key in self.required_keys:
- if required_key not in kwargs:
- raise RuntimeError("Key %s is required" % required_key)
-
- self.unrecognized_keys = {} # Keys we were given not listed as valid
- for k, v in kwargs.items():
- if k in self.valid_keys or k in self.internal_keys:
- self.__dict__[k] = v
- else:
- self.unrecognized_keys[k] = v
-
- def get_dict(self):
- """Converts this protocol unit into a :py:class:`dict`, ready to be
- encoded as JSON.
-
- The keys it encodes are: `valid_keys` - `internal_keys`
-
- Returns
- dict
- """
- d = {
- k: _encode(v)
- for (k, v) in self.__dict__.items()
- if k in self.valid_keys and k not in self.internal_keys
- }
- d.update(self.unrecognized_keys)
- return d
-
- def get_internal_dict(self):
- d = {
- k: _encode(v, internal=True)
- for (k, v) in self.__dict__.items()
- if k in self.valid_keys
- }
- d.update(self.unrecognized_keys)
- return d
-
- def __str__(self):
- return "(%s, %s)" % (self.__class__.__name__, repr(self.__dict__))
-
-
-def _encode(obj, internal=False):
- if type(obj) is list:
- return [_encode(o, internal=internal) for o in obj]
-
- if isinstance(obj, JsonEncodedObject):
- if internal:
- return obj.get_internal_dict()
- else:
- return obj.get_dict()
-
- return obj
diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py
index da24ba0470..cfb5b94ca9 100644
--- a/synapse/util/manhole.py
+++ b/synapse/util/manhole.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import inspect
import sys
import traceback
@@ -20,6 +21,7 @@ from twisted.conch.insults import insults
from twisted.conch.manhole import ColoredManhole, ManholeInterpreter
from twisted.conch.ssh.keys import Key
from twisted.cred import checkers, portal
+from twisted.internet import defer
PUBLIC_KEY = (
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5"
@@ -59,7 +61,7 @@ EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs=
-----END RSA PRIVATE KEY-----"""
-def manhole(username, password, globals):
+def manhole(settings, globals):
"""Starts a ssh listener with password authentication using
the given username and password. Clients connecting to the ssh
listener will find themselves in a colored python shell with
@@ -73,6 +75,15 @@ def manhole(username, password, globals):
Returns:
twisted.internet.protocol.Factory: A factory to pass to ``listenTCP``
"""
+ username = settings.username
+ password = settings.password
+ priv_key = settings.priv_key
+ if priv_key is None:
+ priv_key = Key.fromString(PRIVATE_KEY)
+ pub_key = settings.pub_key
+ if pub_key is None:
+ pub_key = Key.fromString(PUBLIC_KEY)
+
if not isinstance(password, bytes):
password = password.encode("ascii")
@@ -84,8 +95,8 @@ def manhole(username, password, globals):
)
factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
- factory.publicKeys[b"ssh-rsa"] = Key.fromString(PUBLIC_KEY)
- factory.privateKeys[b"ssh-rsa"] = Key.fromString(PRIVATE_KEY)
+ factory.privateKeys[b"ssh-rsa"] = priv_key
+ factory.publicKeys[b"ssh-rsa"] = pub_key
return factory
@@ -141,3 +152,15 @@ class SynapseManholeInterpreter(ManholeInterpreter):
self.write("".join(lines))
finally:
last_tb = ei = None
+
+ def displayhook(self, obj):
+ """
+ We override the displayhook so that we automatically convert coroutines
+ into Deferreds. (Our superclass' displayhook will take care of the rest,
+ by displaying the Deferred if it's ready, or registering a callback
+ if it's not).
+ """
+ if inspect.iscoroutine(obj):
+ super().displayhook(defer.ensureDeferred(obj))
+ else:
+ super().displayhook(obj)
|