From ba7a91aea5fd624bf048f0fda0dca80da7a1945e Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Tue, 21 Sep 2021 12:09:57 -0400
Subject: Refactor oEmbed previews (#10814)
The major change is moving the decision of whether to use oEmbed
further up the call-stack. This reverts the _download_url method to
being a "dumb" functionwhich takes a single URL and downloads it
(as it was before #7920).
This also makes more minor refactorings:
* Renames internal variables for clarity.
* Factors out shared code between the HTML and rich oEmbed
previews.
* Fixes tests to preview an oEmbed image.
---
changelog.d/10814.feature | 1 +
docs/development/url_previews.md | 21 +-
synapse/rest/media/v1/oembed.py | 145 +++++++-----
synapse/rest/media/v1/preview_url_resource.py | 326 +++++++++++++++-----------
tests/rest/media/v1/test_url_preview.py | 26 +-
5 files changed, 299 insertions(+), 220 deletions(-)
create mode 100644 changelog.d/10814.feature
diff --git a/changelog.d/10814.feature b/changelog.d/10814.feature
new file mode 100644
index 0000000000..4fa95a6cc9
--- /dev/null
+++ b/changelog.d/10814.feature
@@ -0,0 +1 @@
+Improve oEmbed previews by processing the author name, photo, and video information.
diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md
index bbe05e281c..aff3813609 100644
--- a/docs/development/url_previews.md
+++ b/docs/development/url_previews.md
@@ -25,16 +25,14 @@ When Synapse is asked to preview a URL it does the following:
3. Kicks off a background process to generate a preview:
1. Checks the database cache by URL and timestamp and returns the result if it
has not expired and was successful (a 2xx return code).
- 2. Checks if the URL matches an oEmbed pattern. If it does, fetch the oEmbed
- response. If this is an image, replace the URL to fetch and continue. If
- if it is HTML content, use the HTML as the document and continue.
- 3. If it doesn't match an oEmbed pattern, downloads the URL and stores it
- into a file via the media storage provider and saves the local media
- metadata.
- 5. If the media is an image:
+ 2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it
+ does, update the URL to download.
+ 3. Downloads the URL and stores it into a file via the media storage provider
+ and saves the local media metadata.
+ 4. If the media is an image:
1. Generates thumbnails.
2. Generates an Open Graph response based on image properties.
- 6. If the media is HTML:
+ 5. If the media is HTML:
1. Decodes the HTML via the stored file.
2. Generates an Open Graph response from the HTML.
3. If an image exists in the Open Graph response:
@@ -42,6 +40,13 @@ When Synapse is asked to preview a URL it does the following:
provider and saves the local media metadata.
2. Generates thumbnails.
3. Updates the Open Graph response based on image properties.
+ 6. If the media is JSON and an oEmbed URL was found:
+ 1. Convert the oEmbed response to an Open Graph response.
+ 2. If a thumbnail or image is in the oEmbed response:
+ 1. Downloads the URL and stores it into a file via the media storage
+ provider and saves the local media metadata.
+ 2. Generates thumbnails.
+ 3. Updates the Open Graph response based on image properties.
7. Stores the result in the database cache.
4. Returns the result.
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
index 2e6706dbfa..8b74e72655 100644
--- a/synapse/rest/media/v1/oembed.py
+++ b/synapse/rest/media/v1/oembed.py
@@ -12,11 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import urllib.parse
from typing import TYPE_CHECKING, Optional
import attr
from synapse.http.client import SimpleHttpClient
+from synapse.types import JsonDict
+from synapse.util import json_decoder
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -24,18 +27,15 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-@attr.s(slots=True, auto_attribs=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class OEmbedResult:
- # Either HTML content or URL must be provided.
- html: Optional[str]
- url: Optional[str]
- title: Optional[str]
- # Number of seconds to cache the content.
- cache_age: int
-
-
-class OEmbedError(Exception):
- """An error occurred processing the oEmbed object."""
+ # The Open Graph result (converted from the oEmbed result).
+ open_graph_result: JsonDict
+ # Number of seconds to cache the content, according to the oEmbed response.
+ #
+ # This will be None if no cache-age is provided in the oEmbed response (or
+ # if the oEmbed response cannot be turned into an Open Graph response).
+ cache_age: Optional[int]
class OEmbedProvider:
@@ -81,75 +81,106 @@ class OEmbedProvider:
"""
for url_pattern, endpoint in self._oembed_patterns.items():
if url_pattern.fullmatch(url):
- return endpoint
+ # TODO Specify max height / width.
+
+ # Note that only the JSON format is supported, some endpoints want
+ # this in the URL, others want it as an argument.
+ endpoint = endpoint.replace("{format}", "json")
+
+ args = {"url": url, "format": "json"}
+ query_str = urllib.parse.urlencode(args, True)
+ return f"{endpoint}?{query_str}"
# No match.
return None
- async def get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
+ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult:
"""
- Request content from an oEmbed endpoint.
+ Parse the oEmbed response into an Open Graph response.
Args:
- endpoint: The oEmbed API endpoint.
- url: The URL to pass to the API.
+ url: The URL which is being previewed (not the one which was
+ requested).
+ raw_body: The oEmbed response as JSON encoded as bytes.
Returns:
- An object representing the metadata returned.
-
- Raises:
- OEmbedError if fetching or parsing of the oEmbed information fails.
+ json-encoded Open Graph data
"""
- try:
- logger.debug("Trying to get oEmbed content for url '%s'", url)
- # Note that only the JSON format is supported, some endpoints want
- # this in the URL, others want it as an argument.
- endpoint = endpoint.replace("{format}", "json")
-
- result = await self._client.get_json(
- endpoint,
- # TODO Specify max height / width.
- args={"url": url, "format": "json"},
- )
+ try:
+ # oEmbed responses *must* be UTF-8 according to the spec.
+ oembed = json_decoder.decode(raw_body.decode("utf-8"))
# Ensure there's a version of 1.0.
- if result.get("version") != "1.0":
- raise OEmbedError("Invalid version: %s" % (result.get("version"),))
-
- oembed_type = result.get("type")
+ oembed_version = oembed["version"]
+ if oembed_version != "1.0":
+ raise RuntimeError(f"Invalid version: {oembed_version}")
# Ensure the cache age is None or an int.
- cache_age = result.get("cache_age")
+ cache_age = oembed.get("cache_age")
if cache_age:
cache_age = int(cache_age)
- oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
+ # The results.
+ open_graph_response = {"og:title": oembed.get("title")}
- # HTML content.
+ # If a thumbnail exists, use it. Note that dimensions will be calculated later.
+ if "thumbnail_url" in oembed:
+ open_graph_response["og:image"] = oembed["thumbnail_url"]
+
+ # Process each type separately.
+ oembed_type = oembed["type"]
if oembed_type == "rich":
- oembed_result.html = result.get("html")
- return oembed_result
+ calc_description_and_urls(open_graph_response, oembed["html"])
- if oembed_type == "photo":
- oembed_result.url = result.get("url")
- return oembed_result
+ elif oembed_type == "photo":
+ # If this is a photo, use the full image, not the thumbnail.
+ open_graph_response["og:image"] = oembed["url"]
- # TODO Handle link and video types.
+ else:
+ raise RuntimeError(f"Unknown oEmbed type: {oembed_type}")
- if "thumbnail_url" in result:
- oembed_result.url = result.get("thumbnail_url")
- return oembed_result
+ except Exception as e:
+ # Trap any exception and let the code follow as usual.
+ logger.warning(f"Error parsing oEmbed metadata from {url}: {e:r}")
+ open_graph_response = {}
+ cache_age = None
- raise OEmbedError("Incompatible oEmbed information.")
+ return OEmbedResult(open_graph_response, cache_age)
- except OEmbedError as e:
- # Trap OEmbedErrors first so we can directly re-raise them.
- logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
- raise
- except Exception as e:
- # Trap any exception and let the code follow as usual.
- # FIXME: pass through 404s and other error messages nicely
- logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
- raise OEmbedError() from e
+def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) -> None:
+ """
+ Calculate description for an HTML document.
+
+ This uses lxml to convert the HTML document into plaintext. If errors
+ occur during processing of the document, an empty response is returned.
+
+ Args:
+ open_graph_response: The current Open Graph summary. This is updated with additional fields.
+ html_body: The HTML document, as bytes.
+
+ Returns:
+ The summary
+ """
+ # If there's no body, nothing useful is going to be found.
+ if not html_body:
+ return
+
+ from lxml import etree
+
+ # Create an HTML parser. If this fails, log and return no metadata.
+ parser = etree.HTMLParser(recover=True, encoding="utf-8")
+
+ # Attempt to parse the body. If this fails, log and return no metadata.
+ tree = etree.fromstring(html_body, parser)
+
+ # The data was successfully parsed, but no tree was found.
+ if tree is None:
+ return
+
+ from synapse.rest.media.v1.preview_url_resource import _calc_description
+
+ description = _calc_description(tree)
+ if description:
+ open_graph_response["og:description"] = description
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index fe0627d9b0..0a0b476d2b 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -44,7 +44,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.media.v1._base import get_filename_from_headers
from synapse.rest.media.v1.media_storage import MediaStorage
-from synapse.rest.media.v1.oembed import OEmbedError, OEmbedProvider
+from synapse.rest.media.v1.oembed import OEmbedProvider
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.async_helpers import ObservableDeferred
@@ -73,6 +73,7 @@ OG_TAG_NAME_MAXLEN = 50
OG_TAG_VALUE_MAXLEN = 1000
ONE_HOUR = 60 * 60 * 1000
+ONE_DAY = 24 * ONE_HOUR
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -255,10 +256,19 @@ class PreviewUrlResource(DirectServeJsonResource):
og = og.encode("utf8")
return og
- media_info = await self._download_url(url, user)
+ # If this URL can be accessed via oEmbed, use that instead.
+ url_to_download = url
+ oembed_url = self._oembed.get_oembed_url(url)
+ if oembed_url:
+ url_to_download = oembed_url
+
+ media_info = await self._download_url(url_to_download, user)
logger.debug("got media_info of '%s'", media_info)
+ # The number of milliseconds that the response should be considered valid.
+ expiration_ms = media_info.expires
+
if _is_media(media_info.media_type):
file_id = media_info.filesystem_id
dims = await self.media_repo._generate_thumbnails(
@@ -288,34 +298,22 @@ class PreviewUrlResource(DirectServeJsonResource):
encoding = get_html_media_encoding(body, media_info.media_type)
og = decode_and_calc_og(body, media_info.uri, encoding)
- # pre-cache the image for posterity
- # FIXME: it might be cleaner to use the same flow as the main /preview_url
- # request itself and benefit from the same caching etc. But for now we
- # just rely on the caching on the master request to speed things up.
- if "og:image" in og and og["og:image"]:
- image_info = await self._download_url(
- _rebase_url(og["og:image"], media_info.uri), user
- )
+ await self._precache_image_url(user, media_info, og)
+
+ elif oembed_url and _is_json(media_info.media_type):
+ # Handle an oEmbed response.
+ with open(media_info.filename, "rb") as file:
+ body = file.read()
+
+ oembed_response = self._oembed.parse_oembed_response(media_info.uri, body)
+ og = oembed_response.open_graph_result
+
+ # Use the cache age from the oEmbed result, instead of the HTTP response.
+ if oembed_response.cache_age is not None:
+ expiration_ms = oembed_response.cache_age
+
+ await self._precache_image_url(user, media_info, og)
- if _is_media(image_info.media_type):
- # TODO: make sure we don't choke on white-on-transparent images
- file_id = image_info.filesystem_id
- dims = await self.media_repo._generate_thumbnails(
- None, file_id, file_id, image_info.media_type, url_cache=True
- )
- if dims:
- og["og:image:width"] = dims["width"]
- og["og:image:height"] = dims["height"]
- else:
- logger.warning("Couldn't get dims for %s", og["og:image"])
-
- og[
- "og:image"
- ] = f"mxc://{self.server_name}/{image_info.filesystem_id}"
- og["og:image:type"] = image_info.media_type
- og["matrix:image:size"] = image_info.media_length
- else:
- del og["og:image"]
else:
logger.warning("Failed to find any OG data in %s", url)
og = {}
@@ -336,12 +334,15 @@ class PreviewUrlResource(DirectServeJsonResource):
jsonog = json_encoder.encode(og)
+ # Cap the amount of time to consider a response valid.
+ expiration_ms = min(expiration_ms, ONE_DAY)
+
# store OG in history-aware DB cache
await self.store.store_url_cache(
url,
media_info.response_code,
media_info.etag,
- media_info.expires + media_info.created_ts_ms,
+ media_info.created_ts_ms + expiration_ms,
jsonog,
media_info.filesystem_id,
media_info.created_ts_ms,
@@ -358,88 +359,52 @@ class PreviewUrlResource(DirectServeJsonResource):
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
- # If this URL can be accessed via oEmbed, use that instead.
- url_to_download: Optional[str] = url
- oembed_url = self._oembed.get_oembed_url(url)
- if oembed_url:
- # The result might be a new URL to download, or it might be HTML content.
+ with self.media_storage.store_into_file(file_info) as (f, fname, finish):
try:
- oembed_result = await self._oembed.get_oembed_content(oembed_url, url)
- if oembed_result.url:
- url_to_download = oembed_result.url
- elif oembed_result.html:
- url_to_download = None
- except OEmbedError:
- # If an error occurs, try doing a normal preview.
- pass
+ logger.debug("Trying to get preview for url '%s'", url)
+ length, headers, uri, code = await self.client.get_file(
+ url,
+ output_stream=f,
+ max_size=self.max_spider_size,
+ headers={"Accept-Language": self.url_preview_accept_language},
+ )
+ except SynapseError:
+ # Pass SynapseErrors through directly, so that the servlet
+ # handler will return a SynapseError to the client instead of
+ # blank data or a 500.
+ raise
+ except DNSLookupError:
+ # DNS lookup returned no results
+ # Note: This will also be the case if one of the resolved IP
+ # addresses is blacklisted
+ raise SynapseError(
+ 502,
+ "DNS resolution failure during URL preview generation",
+ Codes.UNKNOWN,
+ )
+ except Exception as e:
+ # FIXME: pass through 404s and other error messages nicely
+ logger.warning("Error downloading %s: %r", url, e)
- if url_to_download:
- with self.media_storage.store_into_file(file_info) as (f, fname, finish):
- try:
- logger.debug("Trying to get preview for url '%s'", url_to_download)
- length, headers, uri, code = await self.client.get_file(
- url_to_download,
- output_stream=f,
- max_size=self.max_spider_size,
- headers={"Accept-Language": self.url_preview_accept_language},
- )
- except SynapseError:
- # Pass SynapseErrors through directly, so that the servlet
- # handler will return a SynapseError to the client instead of
- # blank data or a 500.
- raise
- except DNSLookupError:
- # DNS lookup returned no results
- # Note: This will also be the case if one of the resolved IP
- # addresses is blacklisted
- raise SynapseError(
- 502,
- "DNS resolution failure during URL preview generation",
- Codes.UNKNOWN,
- )
- except Exception as e:
- # FIXME: pass through 404s and other error messages nicely
- logger.warning("Error downloading %s: %r", url_to_download, e)
-
- raise SynapseError(
- 500,
- "Failed to download content: %s"
- % (traceback.format_exception_only(sys.exc_info()[0], e),),
- Codes.UNKNOWN,
- )
- await finish()
-
- if b"Content-Type" in headers:
- media_type = headers[b"Content-Type"][0].decode("ascii")
- else:
- media_type = "application/octet-stream"
+ raise SynapseError(
+ 500,
+ "Failed to download content: %s"
+ % (traceback.format_exception_only(sys.exc_info()[0], e),),
+ Codes.UNKNOWN,
+ )
+ await finish()
- download_name = get_filename_from_headers(headers)
+ if b"Content-Type" in headers:
+ media_type = headers[b"Content-Type"][0].decode("ascii")
+ else:
+ media_type = "application/octet-stream"
- # FIXME: we should calculate a proper expiration based on the
- # Cache-Control and Expire headers. But for now, assume 1 hour.
- expires = ONE_HOUR
- etag = (
- headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
- )
- else:
- # we can only get here if we did an oembed request and have an oembed_result.html
- assert oembed_result.html is not None
- assert oembed_url is not None
-
- html_bytes = oembed_result.html.encode("utf-8")
- with self.media_storage.store_into_file(file_info) as (f, fname, finish):
- f.write(html_bytes)
- await finish()
-
- media_type = "text/html"
- download_name = oembed_result.title
- length = len(html_bytes)
- # If a specific cache age was not given, assume 1 hour.
- expires = oembed_result.cache_age or ONE_HOUR
- uri = oembed_url
- code = 200
- etag = None
+ download_name = get_filename_from_headers(headers)
+
+ # FIXME: we should calculate a proper expiration based on the
+ # Cache-Control and Expire headers. But for now, assume 1 hour.
+ expires = ONE_HOUR
+ etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
try:
time_now_ms = self.clock.time_msec()
@@ -474,6 +439,46 @@ class PreviewUrlResource(DirectServeJsonResource):
etag=etag,
)
+ async def _precache_image_url(
+ self, user: str, media_info: MediaInfo, og: JsonDict
+ ) -> None:
+ """
+ Pre-cache the image (if one exists) for posterity
+
+ Args:
+ user: The user requesting the preview.
+ media_info: The media being previewed.
+ og: The Open Graph dictionary. This is modified with image information.
+ """
+ # If there's no image or it is blank, there's nothing to do.
+ if "og:image" not in og or not og["og:image"]:
+ return
+
+ # FIXME: it might be cleaner to use the same flow as the main /preview_url
+ # request itself and benefit from the same caching etc. But for now we
+ # just rely on the caching on the master request to speed things up.
+ image_info = await self._download_url(
+ _rebase_url(og["og:image"], media_info.uri), user
+ )
+
+ if _is_media(image_info.media_type):
+ # TODO: make sure we don't choke on white-on-transparent images
+ file_id = image_info.filesystem_id
+ dims = await self.media_repo._generate_thumbnails(
+ None, file_id, file_id, image_info.media_type, url_cache=True
+ )
+ if dims:
+ og["og:image:width"] = dims["width"]
+ og["og:image:height"] = dims["height"]
+ else:
+ logger.warning("Couldn't get dims for %s", og["og:image"])
+
+ og["og:image"] = f"mxc://{self.server_name}/{image_info.filesystem_id}"
+ og["og:image:type"] = image_info.media_type
+ og["matrix:image:size"] = image_info.media_length
+ else:
+ del og["og:image"]
+
def _start_expire_url_cache_data(self) -> Deferred:
return run_as_background_process(
"expire_url_cache_data", self._expire_url_cache_data
@@ -527,7 +532,7 @@ class PreviewUrlResource(DirectServeJsonResource):
# These may be cached for a bit on the client (i.e., they
# may have a room open with a preview url thing open).
# So we wait a couple of days before deleting, just in case.
- expire_before = now - 2 * 24 * ONE_HOUR
+ expire_before = now - 2 * ONE_DAY
media_ids = await self.store.get_url_cache_media_before(expire_before)
removed_media = []
@@ -669,7 +674,18 @@ def decode_and_calc_og(
def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
- # suck our tree into lxml and define our OG response.
+ """
+ Calculate metadata for an HTML document.
+
+ This uses lxml to search the HTML document for Open Graph data.
+
+ Args:
+ tree: The parsed HTML document.
+ media_url: The URI used to download the body.
+
+ Returns:
+ The Open Graph response as a dictionary.
+ """
# if we see any image URLs in the OG response, then spider them
# (although the client could choose to do this by asking for previews of those
@@ -743,35 +759,7 @@ def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
if meta_description:
og["og:description"] = meta_description[0]
else:
- # grab any text nodes which are inside the tag...
- # unless they are within an HTML5 semantic markup tag...
- # , , ,
- # ...or if they are within a or tag.
- # This is a very very very coarse approximation to a plain text
- # render of the page.
-
- # We don't just use XPATH here as that is slow on some machines.
-
- from lxml import etree
-
- TAGS_TO_REMOVE = (
- "header",
- "nav",
- "aside",
- "footer",
- "script",
- "noscript",
- "style",
- etree.Comment,
- )
-
- # Split all the text nodes into paragraphs (by splitting on new
- # lines)
- text_nodes = (
- re.sub(r"\s+", "\n", el).strip()
- for el in _iterate_over_text(tree.find("body"), *TAGS_TO_REMOVE)
- )
- og["og:description"] = summarize_paragraphs(text_nodes)
+ og["og:description"] = _calc_description(tree)
elif og["og:description"]:
# This must be a non-empty string at this point.
assert isinstance(og["og:description"], str)
@@ -782,6 +770,46 @@ def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
return og
+def _calc_description(tree: "etree.Element") -> Optional[str]:
+ """
+ Calculate a text description based on an HTML document.
+
+ Grabs any text nodes which are inside the
tag, unless they are within
+ an HTML5 semantic markup tag (, , , ), or
+ if they are within a or tag.
+
+ This is a very very very coarse approximation to a plain text render of the page.
+
+ Args:
+ tree: The parsed HTML document.
+
+ Returns:
+ The plain text description, or None if one cannot be generated.
+ """
+ # We don't just use XPATH here as that is slow on some machines.
+
+ from lxml import etree
+
+ TAGS_TO_REMOVE = (
+ "header",
+ "nav",
+ "aside",
+ "footer",
+ "script",
+ "noscript",
+ "style",
+ etree.Comment,
+ )
+
+ # Split all the text nodes into paragraphs (by splitting on new
+ # lines)
+ text_nodes = (
+ re.sub(r"\s+", "\n", el).strip()
+ for el in _iterate_over_text(tree.find("body"), *TAGS_TO_REMOVE)
+ )
+ return summarize_paragraphs(text_nodes)
+
+
def _iterate_over_text(
tree: "etree.Element", *tags_to_ignore: Iterable[Union[str, "etree.Comment"]]
) -> Generator[str, None, None]:
@@ -841,11 +869,25 @@ def _is_html(content_type: str) -> bool:
)
+def _is_json(content_type: str) -> bool:
+ return content_type.lower().startswith("application/json")
+
+
def summarize_paragraphs(
text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500
) -> Optional[str]:
- # Try to get a summary of between 200 and 500 words, respecting
- # first paragraph and then word boundaries.
+ """
+ Try to get a summary respecting first paragraph and then word boundaries.
+
+ Args:
+ text_nodes: The paragraphs to summarize.
+ min_size: The minimum number of words to include.
+ max_size: The maximum number of words to include.
+
+ Returns:
+ A summary of the text nodes, or None if that was not possible.
+ """
+
# TODO: Respect sentences?
description = ""
@@ -868,7 +910,7 @@ def summarize_paragraphs(
new_desc = ""
# This splits the paragraph into words, but keeping the
- # (preceeding) whitespace intact so we can easily concat
+ # (preceding) whitespace intact so we can easily concat
# words back together.
for match in re.finditer(r"\s*\S+", description):
word = match.group()
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 9f6fbfe6de..9d13899584 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -24,6 +24,7 @@ from synapse.config.oembed import OEmbedEndpointConfig
from tests import unittest
from tests.server import FakeTransport
+from tests.test_utils import SMALL_PNG
try:
import lxml
@@ -576,13 +577,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
}
oembed_content = json.dumps(result).encode("utf-8")
- end_content = (
- b"
"
- b"
Some Title
"
- b''
- b""
- )
-
channel = self.make_request(
"GET",
"preview_url?url=http://twitter.com/matrixdotorg/status/12345",
@@ -606,6 +600,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.pump()
+ # Ensure a second request is made to the photo URL.
client = self.reactor.tcpClients[1][2].buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, self.reactor))
@@ -613,18 +608,23 @@ class URLPreviewTests(unittest.HomeserverTestCase):
client.dataReceived(
(
b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
- b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ b"Content-Type: image/png\r\n\r\n"
)
- % (len(end_content),)
- + end_content
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
)
self.pump()
+ # Ensure the URL is what was requested.
+ self.assertIn(b"/matrixdotorg", server.data)
+
self.assertEqual(channel.code, 200)
- self.assertEqual(
- channel.json_body, {"og:title": "Some Title", "og:description": "hi"}
- )
+ self.assertIsNone(channel.json_body["og:title"])
+ self.assertTrue(channel.json_body["og:image"].startswith("mxc://"))
+ self.assertEqual(channel.json_body["og:image:height"], 1)
+ self.assertEqual(channel.json_body["og:image:width"], 1)
+ self.assertEqual(channel.json_body["og:image:type"], "image/png")
def test_oembed_rich(self):
"""Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
--
cgit 1.5.1
From ebd8baf61ff8e00f8de3b63c00531765672000c8 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Tue, 21 Sep 2021 12:32:46 -0400
Subject: Clear our destination directories before copying files to GitHub
pages. (#10869)
This should fix stale deleted files being still accessible.
---
.github/workflows/docs.yaml | 1 -
changelog.d/10869.doc | 1 +
2 files changed, 1 insertion(+), 1 deletion(-)
create mode 100644 changelog.d/10869.doc
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 808f825331..2bf32e376b 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -61,6 +61,5 @@ jobs:
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- keep_files: true
publish_dir: ./book
destination_dir: ./${{ steps.vars.outputs.branch-version }}
diff --git a/changelog.d/10869.doc b/changelog.d/10869.doc
new file mode 100644
index 0000000000..c117386072
--- /dev/null
+++ b/changelog.d/10869.doc
@@ -0,0 +1 @@
+Properly remove deleted files from GitHub pages when generating the documentation.
--
cgit 1.5.1
From b25a494779e7c86181c4b61f9bbb56c63ba529ed Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 21 Sep 2021 17:41:27 +0100
Subject: Add types to http.site (#10867)
---
changelog.d/10867.misc | 1 +
synapse/http/site.py | 40 ++++++++++++++++++++++------------------
2 files changed, 23 insertions(+), 18 deletions(-)
create mode 100644 changelog.d/10867.misc
diff --git a/changelog.d/10867.misc b/changelog.d/10867.misc
new file mode 100644
index 0000000000..01e51fbc6e
--- /dev/null
+++ b/changelog.d/10867.misc
@@ -0,0 +1 @@
+Add type hints to `synapse.http.site`.
diff --git a/synapse/http/site.py b/synapse/http/site.py
index c665a9d5db..dd4c749e16 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -21,7 +21,7 @@ from zope.interface import implementer
from twisted.internet.interfaces import IAddress, IReactorTime
from twisted.python.failure import Failure
-from twisted.web.resource import IResource
+from twisted.web.resource import IResource, Resource
from twisted.web.server import Request, Site
from synapse.config.server import ListenerConfig
@@ -61,7 +61,7 @@ class SynapseRequest(Request):
logcontext: the log context for this request
"""
- def __init__(self, channel, *args, max_request_body_size=1024, **kw):
+ def __init__(self, channel, *args, max_request_body_size: int = 1024, **kw):
Request.__init__(self, channel, *args, **kw)
self._max_request_body_size = max_request_body_size
self.site: SynapseSite = channel.site
@@ -83,13 +83,13 @@ class SynapseRequest(Request):
self._is_processing = False
# the time when the asynchronous request handler completed its processing
- self._processing_finished_time = None
+ self._processing_finished_time: Optional[float] = None
# what time we finished sending the response to the client (or the connection
# dropped)
- self.finish_time = None
+ self.finish_time: Optional[float] = None
- def __repr__(self):
+ def __repr__(self) -> str:
# We overwrite this so that we don't log ``access_token``
return "<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>" % (
self.__class__.__name__,
@@ -100,7 +100,7 @@ class SynapseRequest(Request):
self.site.site_tag,
)
- def handleContentChunk(self, data):
+ def handleContentChunk(self, data: bytes) -> None:
# we should have a `content` by now.
assert self.content, "handleContentChunk() called before gotLength()"
if self.content.tell() + len(data) > self._max_request_body_size:
@@ -139,7 +139,7 @@ class SynapseRequest(Request):
# If there's no authenticated entity, it was the requester.
self.logcontext.request.authenticated_entity = authenticated_entity or requester
- def get_request_id(self):
+ def get_request_id(self) -> str:
return "%s-%i" % (self.get_method(), self.request_seq)
def get_redacted_uri(self) -> str:
@@ -205,7 +205,7 @@ class SynapseRequest(Request):
return None, None
- def render(self, resrc):
+ def render(self, resrc: Resource) -> None:
# this is called once a Resource has been found to serve the request; in our
# case the Resource in question will normally be a JsonResource.
@@ -282,7 +282,7 @@ class SynapseRequest(Request):
if self.finish_time is not None:
self._finished_processing()
- def finish(self):
+ def finish(self) -> None:
"""Called when all response data has been written to this Request.
Overrides twisted.web.server.Request.finish to record the finish time and do
@@ -295,7 +295,7 @@ class SynapseRequest(Request):
with PreserveLoggingContext(self.logcontext):
self._finished_processing()
- def connectionLost(self, reason):
+ def connectionLost(self, reason: Union[Failure, Exception]) -> None:
"""Called when the client connection is closed before the response is written.
Overrides twisted.web.server.Request.connectionLost to record the finish time and
@@ -327,7 +327,7 @@ class SynapseRequest(Request):
if not self._is_processing:
self._finished_processing()
- def _started_processing(self, servlet_name):
+ def _started_processing(self, servlet_name: str) -> None:
"""Record the fact that we are processing this request.
This will log the request's arrival. Once the request completes,
@@ -354,9 +354,11 @@ class SynapseRequest(Request):
self.get_redacted_uri(),
)
- def _finished_processing(self):
+ def _finished_processing(self) -> None:
"""Log the completion of this request and update the metrics"""
assert self.logcontext is not None
+ assert self.finish_time is not None
+
usage = self.logcontext.get_resource_usage()
if self._processing_finished_time is None:
@@ -437,7 +439,7 @@ class XForwardedForRequest(SynapseRequest):
_forwarded_for: "Optional[_XForwardedForAddress]" = None
_forwarded_https: bool = False
- def requestReceived(self, command, path, version):
+ def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None:
# this method is called by the Channel once the full request has been
# received, to dispatch the request to a resource.
# We can use it to set the IP address and protocol according to the
@@ -445,7 +447,7 @@ class XForwardedForRequest(SynapseRequest):
self._process_forwarded_headers()
return super().requestReceived(command, path, version)
- def _process_forwarded_headers(self):
+ def _process_forwarded_headers(self) -> None:
headers = self.requestHeaders.getRawHeaders(b"x-forwarded-for")
if not headers:
return
@@ -470,7 +472,7 @@ class XForwardedForRequest(SynapseRequest):
)
self._forwarded_https = True
- def isSecure(self):
+ def isSecure(self) -> bool:
if self._forwarded_https:
return True
return super().isSecure()
@@ -545,14 +547,16 @@ class SynapseSite(Site):
proxied = config.http_options.x_forwarded
request_class = XForwardedForRequest if proxied else SynapseRequest
- def request_factory(channel, queued) -> Request:
+ def request_factory(channel, queued: bool) -> Request:
return request_class(
- channel, max_request_body_size=max_request_body_size, queued=queued
+ channel,
+ max_request_body_size=max_request_body_size,
+ queued=queued,
)
self.requestFactory = request_factory # type: ignore
self.access_logger = logging.getLogger(logger_name)
self.server_version_string = server_version_string.encode("ascii")
- def log(self, request):
+ def log(self, request: SynapseRequest) -> None:
pass
--
cgit 1.5.1
From 4054dfa409fa17b45ab8f265813994956ed97bae Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Tue, 21 Sep 2021 13:34:26 -0400
Subject: Add type hints for event streams. (#10856)
---
changelog.d/10856.misc | 1 +
synapse/handlers/account_data.py | 13 ++++++--
synapse/handlers/appservice.py | 6 ++--
synapse/handlers/initial_sync.py | 2 +-
synapse/handlers/presence.py | 8 +++--
synapse/handlers/receipts.py | 13 ++++++--
synapse/handlers/room.py | 18 ++++++++---
synapse/handlers/sync.py | 6 ++--
synapse/handlers/typing.py | 13 ++++++--
synapse/module_api/__init__.py | 2 +-
synapse/notifier.py | 2 +-
synapse/storage/databases/main/receipts.py | 6 ++--
synapse/streams/__init__.py | 22 ++++++++++++++
synapse/streams/events.py | 49 ++++++++++++++++++------------
tests/handlers/test_receipts.py | 2 +-
tests/handlers/test_typing.py | 46 +++++++++++++++++++++++-----
tests/rest/client/test_shadow_banned.py | 10 ++++--
tests/rest/client/test_typing.py | 10 ++++--
18 files changed, 169 insertions(+), 60 deletions(-)
create mode 100644 changelog.d/10856.misc
diff --git a/changelog.d/10856.misc b/changelog.d/10856.misc
new file mode 100644
index 0000000000..f09af2e00a
--- /dev/null
+++ b/changelog.d/10856.misc
@@ -0,0 +1 @@
+Add missing type hints to handlers.
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index e9e7a78546..96273e2f81 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import random
-from typing import TYPE_CHECKING, Any, List, Tuple
+from typing import TYPE_CHECKING, Collection, List, Optional, Tuple
from synapse.replication.http.account_data import (
ReplicationAddTagRestServlet,
@@ -21,6 +21,7 @@ from synapse.replication.http.account_data import (
ReplicationRoomAccountDataRestServlet,
ReplicationUserAccountDataRestServlet,
)
+from synapse.streams import EventSource
from synapse.types import JsonDict, UserID
if TYPE_CHECKING:
@@ -163,7 +164,7 @@ class AccountDataHandler:
return response["max_stream_id"]
-class AccountDataEventSource:
+class AccountDataEventSource(EventSource[int, JsonDict]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -171,7 +172,13 @@ class AccountDataEventSource:
return self.store.get_max_account_data_stream_id()
async def get_new_events(
- self, user: UserID, from_key: int, **kwargs: Any
+ self,
+ user: UserID,
+ from_key: int,
+ limit: Optional[int],
+ room_ids: Collection[str],
+ is_guest: bool,
+ explicit_room_id: Optional[str] = None,
) -> Tuple[List[JsonDict], int]:
user_id = user.to_string()
last_stream_id = from_key
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 8bde9ed66f..b7213b67a5 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -254,7 +254,7 @@ class ApplicationServicesHandler:
async def _handle_typing(
self, service: ApplicationService, new_token: int
) -> List[JsonDict]:
- typing_source = self.event_sources.sources["typing"]
+ typing_source = self.event_sources.sources.typing
# Get the typing events from just before current
typing, _ = await typing_source.get_new_events_as(
service=service,
@@ -269,7 +269,7 @@ class ApplicationServicesHandler:
from_key = await self.store.get_type_stream_id_for_appservice(
service, "read_receipt"
)
- receipts_source = self.event_sources.sources["receipt"]
+ receipts_source = self.event_sources.sources.receipt
receipts, _ = await receipts_source.get_new_events_as(
service=service, from_key=from_key
)
@@ -279,7 +279,7 @@ class ApplicationServicesHandler:
self, service: ApplicationService, users: Collection[Union[str, UserID]]
) -> List[JsonDict]:
events: List[JsonDict] = []
- presence_source = self.event_sources.sources["presence"]
+ presence_source = self.event_sources.sources.presence
from_key = await self.store.get_type_stream_id_for_appservice(
service, "presence"
)
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index c942086e74..9ad39a65d8 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -125,7 +125,7 @@ class InitialSyncHandler(BaseHandler):
now_token = self.hs.get_event_sources().get_current_token()
- presence_stream = self.hs.get_event_sources().sources["presence"]
+ presence_stream = self.hs.get_event_sources().sources.presence
presence, _ = await presence_stream.get_new_events(
user, from_key=None, include_offline=False
)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 841c8815b0..983c837c66 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -65,6 +65,7 @@ from synapse.replication.http.streams import ReplicationGetStreamUpdates
from synapse.replication.tcp.commands import ClearUserSyncsCommand
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
from synapse.storage.databases.main import DataStore
+from synapse.streams import EventSource
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.descriptors import _CacheContext, cached
@@ -1500,7 +1501,7 @@ def format_user_presence_state(
return content
-class PresenceEventSource:
+class PresenceEventSource(EventSource[int, UserPresenceState]):
def __init__(self, hs: "HomeServer"):
# We can't call get_presence_handler here because there's a cycle:
#
@@ -1519,10 +1520,11 @@ class PresenceEventSource:
self,
user: UserID,
from_key: Optional[int],
+ limit: Optional[int] = None,
room_ids: Optional[List[str]] = None,
- include_offline: bool = True,
+ is_guest: bool = False,
explicit_room_id: Optional[str] = None,
- **kwargs: Any,
+ include_offline: bool = True,
) -> Tuple[List[UserPresenceState], int]:
# The process for getting presence events are:
# 1. Get the rooms the user is in.
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index c7567ac05f..5881f09ebd 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, List, Optional, Tuple
+from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from synapse.api.constants import ReadReceiptEventFields
from synapse.appservice import ApplicationService
from synapse.handlers._base import BaseHandler
+from synapse.streams import EventSource
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
if TYPE_CHECKING:
@@ -162,7 +163,7 @@ class ReceiptsHandler(BaseHandler):
await self.federation_sender.send_read_receipt(receipt)
-class ReceiptEventSource:
+class ReceiptEventSource(EventSource[int, JsonDict]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
self.config = hs.config
@@ -216,7 +217,13 @@ class ReceiptEventSource:
return visible_events
async def get_new_events(
- self, from_key: int, room_ids: List[str], user: UserID, **kwargs: Any
+ self,
+ user: UserID,
+ from_key: int,
+ limit: Optional[int],
+ room_ids: Iterable[str],
+ is_guest: bool,
+ explicit_room_id: Optional[str] = None,
) -> Tuple[List[JsonDict], int]:
from_key = int(from_key)
to_key = self.get_current_key()
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index abdd506164..287ea2fd06 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -20,7 +20,16 @@ import math
import random
import string
from collections import OrderedDict
-from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Collection,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+)
from synapse.api.constants import (
EventContentFields,
@@ -47,6 +56,7 @@ from synapse.events import EventBase
from synapse.events.utils import copy_power_levels_contents
from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.state import StateFilter
+from synapse.streams import EventSource
from synapse.types import (
JsonDict,
MutableStateMap,
@@ -1173,7 +1183,7 @@ class RoomContextHandler:
return results
-class RoomEventSource:
+class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -1181,8 +1191,8 @@ class RoomEventSource:
self,
user: UserID,
from_key: RoomStreamToken,
- limit: int,
- room_ids: List[str],
+ limit: Optional[int],
+ room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
) -> Tuple[List[EventBase], RoomStreamToken]:
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index e93db4bdcc..2c7c6d63a9 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -443,7 +443,7 @@ class SyncHandler:
room_ids = sync_result_builder.joined_room_ids
- typing_source = self.event_sources.sources["typing"]
+ typing_source = self.event_sources.sources.typing
typing, typing_key = await typing_source.get_new_events(
user=sync_config.user,
from_key=typing_key,
@@ -465,7 +465,7 @@ class SyncHandler:
receipt_key = since_token.receipt_key if since_token else 0
- receipt_source = self.event_sources.sources["receipt"]
+ receipt_source = self.event_sources.sources.receipt
receipts, receipt_key = await receipt_source.get_new_events(
user=sync_config.user,
from_key=receipt_key,
@@ -1415,7 +1415,7 @@ class SyncHandler:
sync_config = sync_result_builder.sync_config
user = sync_result_builder.sync_config.user
- presence_source = self.event_sources.sources["presence"]
+ presence_source = self.event_sources.sources.presence
since_token = sync_result_builder.since_token
presence_key = None
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 4492c8567b..9326330c90 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -14,7 +14,7 @@
import logging
import random
from collections import namedtuple
-from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
from synapse.api.errors import AuthError, ShadowBanError, SynapseError
from synapse.appservice import ApplicationService
@@ -23,6 +23,7 @@ from synapse.metrics.background_process_metrics import (
wrap_as_background_process,
)
from synapse.replication.tcp.streams import TypingStream
+from synapse.streams import EventSource
from synapse.types import JsonDict, Requester, UserID, get_domain_from_id
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.metrics import Measure
@@ -439,7 +440,7 @@ class TypingWriterHandler(FollowerTypingHandler):
raise Exception("Typing writer instance got typing info over replication")
-class TypingNotificationEventSource:
+class TypingNotificationEventSource(EventSource[int, JsonDict]):
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.clock = hs.get_clock()
@@ -485,7 +486,13 @@ class TypingNotificationEventSource:
return (events, handler._latest_room_serial)
async def get_new_events(
- self, from_key: int, room_ids: Iterable[str], **kwargs: Any
+ self,
+ user: UserID,
+ from_key: int,
+ limit: Optional[int],
+ room_ids: Iterable[str],
+ is_guest: bool,
+ explicit_room_id: Optional[str] = None,
) -> Tuple[List[JsonDict], int]:
with Measure(self.clock, "typing.get_new_events"):
from_key = int(from_key)
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 2d403532fa..3196c2bec6 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -91,7 +91,7 @@ class ModuleApi:
self._auth = hs.get_auth()
self._auth_handler = auth_handler
self._server_name = hs.hostname
- self._presence_stream = hs.get_event_sources().sources["presence"]
+ self._presence_stream = hs.get_event_sources().sources.presence
self._state = hs.get_state_handler()
self._clock: Clock = hs.get_clock()
self._send_email_handler = hs.get_send_email_handler()
diff --git a/synapse/notifier.py b/synapse/notifier.py
index bbe337949a..1a9f84ba45 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -584,7 +584,7 @@ class Notifier:
events: List[EventBase] = []
end_token = from_token
- for name, source in self.event_sources.sources.items():
+ for name, source in self.event_sources.sources.get_sources():
keyname = "%s_key" % name
before_id = getattr(before_token, keyname)
after_id = getattr(after_token, keyname)
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index edeaacd7a6..01a4281301 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -14,7 +14,7 @@
# limitations under the License.
import logging
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any, Dict, Iterable, List, Optional, Tuple
from twisted.internet import defer
@@ -153,12 +153,12 @@ class ReceiptsWorkerStore(SQLBaseStore):
}
async def get_linearized_receipts_for_rooms(
- self, room_ids: List[str], to_key: int, from_key: Optional[int] = None
+ self, room_ids: Iterable[str], to_key: int, from_key: Optional[int] = None
) -> List[dict]:
"""Get receipts for multiple rooms for sending to clients.
Args:
- room_id: List of room_ids.
+ room_id: The room IDs to fetch receipts of.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.
diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py
index 5e83dba2ed..806b671305 100644
--- a/synapse/streams/__init__.py
+++ b/synapse/streams/__init__.py
@@ -11,3 +11,25 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+from typing import Collection, Generic, List, Optional, Tuple, TypeVar
+
+from synapse.types import UserID
+
+# The key, this is either a stream token or int.
+K = TypeVar("K")
+# The return type.
+R = TypeVar("R")
+
+
+class EventSource(Generic[K, R]):
+ async def get_new_events(
+ self,
+ user: UserID,
+ from_key: K,
+ limit: Optional[int],
+ room_ids: Collection[str],
+ is_guest: bool,
+ explicit_room_id: Optional[str] = None,
+ ) -> Tuple[List[R], K]:
+ ...
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
index 99b0aac2fb..21591d0bfd 100644
--- a/synapse/streams/events.py
+++ b/synapse/streams/events.py
@@ -12,29 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Dict
+from typing import TYPE_CHECKING, Iterator, Tuple
+
+import attr
from synapse.handlers.account_data import AccountDataEventSource
from synapse.handlers.presence import PresenceEventSource
from synapse.handlers.receipts import ReceiptEventSource
from synapse.handlers.room import RoomEventSource
from synapse.handlers.typing import TypingNotificationEventSource
+from synapse.streams import EventSource
from synapse.types import StreamToken
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
-class EventSources:
- SOURCE_TYPES = {
- "room": RoomEventSource,
- "presence": PresenceEventSource,
- "typing": TypingNotificationEventSource,
- "receipt": ReceiptEventSource,
- "account_data": AccountDataEventSource,
- }
- def __init__(self, hs):
- self.sources: Dict[str, Any] = {
- name: cls(hs) for name, cls in EventSources.SOURCE_TYPES.items()
- }
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class _EventSourcesInner:
+ room: RoomEventSource
+ presence: PresenceEventSource
+ typing: TypingNotificationEventSource
+ receipt: ReceiptEventSource
+ account_data: AccountDataEventSource
+
+ def get_sources(self) -> Iterator[Tuple[str, EventSource]]:
+ for attribute in _EventSourcesInner.__attrs_attrs__: # type: ignore[attr-defined]
+ yield attribute.name, getattr(self, attribute.name)
+
+
+class EventSources:
+ def __init__(self, hs: "HomeServer"):
+ self.sources = _EventSourcesInner(
+ *(attribute.type(hs) for attribute in _EventSourcesInner.__attrs_attrs__) # type: ignore[attr-defined]
+ )
self.store = hs.get_datastore()
def get_current_token(self) -> StreamToken:
@@ -44,11 +55,11 @@ class EventSources:
groups_key = self.store.get_group_stream_token()
token = StreamToken(
- room_key=self.sources["room"].get_current_key(),
- presence_key=self.sources["presence"].get_current_key(),
- typing_key=self.sources["typing"].get_current_key(),
- receipt_key=self.sources["receipt"].get_current_key(),
- account_data_key=self.sources["account_data"].get_current_key(),
+ room_key=self.sources.room.get_current_key(),
+ presence_key=self.sources.presence.get_current_key(),
+ typing_key=self.sources.typing.get_current_key(),
+ receipt_key=self.sources.receipt.get_current_key(),
+ account_data_key=self.sources.account_data.get_current_key(),
push_rules_key=push_rules_key,
to_device_key=to_device_key,
device_list_key=device_list_key,
@@ -67,7 +78,7 @@ class EventSources:
The current token for pagination.
"""
token = StreamToken(
- room_key=self.sources["room"].get_current_key(),
+ room_key=self.sources.room.get_current_key(),
presence_key=0,
typing_key=0,
receipt_key=0,
diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py
index 732a12c9bd..5de89c873b 100644
--- a/tests/handlers/test_receipts.py
+++ b/tests/handlers/test_receipts.py
@@ -23,7 +23,7 @@ from tests import unittest
class ReceiptsTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, hs):
- self.event_source = hs.get_event_sources().sources["receipt"]
+ self.event_source = hs.get_event_sources().sources.receipt
# In the first param of _test_filters_hidden we use "hidden" instead of
# ReadReceiptEventFields.MSC2285_HIDDEN. We do this because we're mocking
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index fa3cff598e..000f9b9fde 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -89,7 +89,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.handler = hs.get_typing_handler()
- self.event_source = hs.get_event_sources().sources["typing"]
+ self.event_source = hs.get_event_sources().sources.typing
self.datastore = hs.get_datastore()
self.datastore.get_destination_retry_timings = Mock(
@@ -171,7 +171,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
+ self.event_source.get_new_events(
+ user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False
+ )
)
self.assertEquals(
events[0],
@@ -239,7 +241,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
+ self.event_source.get_new_events(
+ user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False
+ )
)
self.assertEquals(
events[0],
@@ -276,7 +280,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 0)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[OTHER_ROOM_ID], from_key=0)
+ self.event_source.get_new_events(
+ user=U_APPLE,
+ from_key=0,
+ limit=None,
+ room_ids=[OTHER_ROOM_ID],
+ is_guest=False,
+ )
)
self.assertEquals(events[0], [])
self.assertEquals(events[1], 0)
@@ -324,7 +334,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
+ self.event_source.get_new_events(
+ user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False
+ )
)
self.assertEquals(
events[0],
@@ -350,7 +362,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
+ self.event_source.get_new_events(
+ user=U_APPLE,
+ from_key=0,
+ limit=None,
+ room_ids=[ROOM_ID],
+ is_guest=False,
+ )
)
self.assertEquals(
events[0],
@@ -369,7 +387,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 2)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=1)
+ self.event_source.get_new_events(
+ user=U_APPLE,
+ from_key=1,
+ limit=None,
+ room_ids=[ROOM_ID],
+ is_guest=False,
+ )
)
self.assertEquals(
events[0],
@@ -392,7 +416,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 3)
events = self.get_success(
- self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
+ self.event_source.get_new_events(
+ user=U_APPLE,
+ from_key=0,
+ limit=None,
+ room_ids=[ROOM_ID],
+ is_guest=False,
+ )
)
self.assertEquals(
events[0],
diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py
index 6a0d9a82be..b0c44af033 100644
--- a/tests/rest/client/test_shadow_banned.py
+++ b/tests/rest/client/test_shadow_banned.py
@@ -193,7 +193,7 @@ class RoomTestCase(_ShadowBannedBase):
self.assertEquals(200, channel.code)
# There should be no typing events.
- event_source = self.hs.get_event_sources().sources["typing"]
+ event_source = self.hs.get_event_sources().sources.typing
self.assertEquals(event_source.get_current_key(), 0)
# The other user can join and send typing events.
@@ -210,7 +210,13 @@ class RoomTestCase(_ShadowBannedBase):
# These appear in the room.
self.assertEquals(event_source.get_current_key(), 1)
events = self.get_success(
- event_source.get_new_events(from_key=0, room_ids=[room_id])
+ event_source.get_new_events(
+ user=UserID.from_string(self.other_user_id),
+ from_key=0,
+ limit=None,
+ room_ids=[room_id],
+ is_guest=False,
+ )
)
self.assertEquals(
events[0],
diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py
index b54b004733..ee0abd5295 100644
--- a/tests/rest/client/test_typing.py
+++ b/tests/rest/client/test_typing.py
@@ -41,7 +41,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase):
federation_client=Mock(),
)
- self.event_source = hs.get_event_sources().sources["typing"]
+ self.event_source = hs.get_event_sources().sources.typing
hs.get_federation_handler = Mock()
@@ -76,7 +76,13 @@ class RoomTypingTestCase(unittest.HomeserverTestCase):
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.get_success(
- self.event_source.get_new_events(from_key=0, room_ids=[self.room_id])
+ self.event_source.get_new_events(
+ user=UserID.from_string(self.user_id),
+ from_key=0,
+ limit=None,
+ room_ids=[self.room_id],
+ is_guest=False,
+ )
)
self.assertEquals(
events[0],
--
cgit 1.5.1
From 51e2db35983953b13e536331ec2f6ad4cae7e0f1 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 21 Sep 2021 15:06:28 -0500
Subject: Rename MSC2716 things from `chunk` to `batch` to match `/batch_send`
endpoint (#10838)
See https://github.com/matrix-org/matrix-doc/pull/2716#discussion_r684574497
Dropping support for older MSC2716 room versions so we don't have to worry about
supporting both chunk and batch events.
---
changelog.d/10838.misc | 1 +
synapse/api/constants.py | 10 +--
synapse/api/room_versions.py | 22 +-----
synapse/event_auth.py | 8 +-
synapse/events/utils.py | 6 +-
synapse/handlers/message.py | 2 +-
synapse/rest/client/room_batch.py | 86 +++++++++++-----------
synapse/storage/databases/main/event_federation.py | 30 ++++----
synapse/storage/databases/main/events.py | 46 ++++++------
synapse/storage/databases/main/room_batch.py | 6 +-
synapse/storage/schema/__init__.py | 2 +-
.../01msc2716_chunk_to_batch_rename.sql.postgres | 23 ++++++
.../64/01msc2716_chunk_to_batch_rename.sql.sqlite | 37 ++++++++++
13 files changed, 162 insertions(+), 117 deletions(-)
create mode 100644 changelog.d/10838.misc
create mode 100644 synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres
create mode 100644 synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite
diff --git a/changelog.d/10838.misc b/changelog.d/10838.misc
new file mode 100644
index 0000000000..b1977d0a2e
--- /dev/null
+++ b/changelog.d/10838.misc
@@ -0,0 +1 @@
+Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 236f0c7f99..39fd9954d5 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -121,7 +121,7 @@ class EventTypes:
SpaceParent = "m.space.parent"
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
- MSC2716_CHUNK = "org.matrix.msc2716.chunk"
+ MSC2716_BATCH = "org.matrix.msc2716.batch"
MSC2716_MARKER = "org.matrix.msc2716.marker"
@@ -209,11 +209,11 @@ class EventContentFields:
# Used on normal messages to indicate they were historically imported after the fact
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
- # For "insertion" events to indicate what the next chunk ID should be in
+ # For "insertion" events to indicate what the next batch ID should be in
# order to connect to it
- MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id"
- # Used on "chunk" events to indicate which insertion event it connects to
- MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id"
+ MSC2716_NEXT_BATCH_ID = "org.matrix.msc2716.next_batch_id"
+ # Used on "batch" events to indicate which insertion event it connects to
+ MSC2716_BATCH_ID = "org.matrix.msc2716.batch_id"
# For "marker" events
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index 61d9c658a9..0a895bba48 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -244,24 +244,8 @@ class RoomVersions:
msc2716_historical=False,
msc2716_redactions=False,
)
- MSC2716 = RoomVersion(
- "org.matrix.msc2716",
- RoomDisposition.UNSTABLE,
- EventFormatVersions.V3,
- StateResolutionVersions.V2,
- enforce_key_validity=True,
- special_case_aliases_auth=False,
- strict_canonicaljson=True,
- limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=True,
- msc2716_historical=True,
- msc2716_redactions=False,
- )
- MSC2716v2 = RoomVersion(
- "org.matrix.msc2716v2",
+ MSC2716v3 = RoomVersion(
+ "org.matrix.msc2716v3",
RoomDisposition.UNSTABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
@@ -289,9 +273,9 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V6,
RoomVersions.MSC2176,
RoomVersions.V7,
- RoomVersions.MSC2716,
RoomVersions.V8,
RoomVersions.V9,
+ RoomVersions.MSC2716v3,
)
}
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index cb133f3f84..fc50a0e71a 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -213,7 +213,7 @@ def check(
if (
event.type == EventTypes.MSC2716_INSERTION
- or event.type == EventTypes.MSC2716_CHUNK
+ or event.type == EventTypes.MSC2716_BATCH
or event.type == EventTypes.MSC2716_MARKER
):
check_historical(room_version_obj, event, auth_events)
@@ -552,14 +552,14 @@ def check_historical(
auth_events: StateMap[EventBase],
) -> None:
"""Check whether the event sender is allowed to send historical related
- events like "insertion", "chunk", and "marker".
+ events like "insertion", "batch", and "marker".
Returns:
None
Raises:
AuthError if the event sender is not allowed to send historical related events
- ("insertion", "chunk", and "marker").
+ ("insertion", "batch", and "marker").
"""
# Ignore the auth checks in room versions that do not support historical
# events
@@ -573,7 +573,7 @@ def check_historical(
if user_level < historical_level:
raise AuthError(
403,
- 'You don\'t have permission to send send historical related events ("insertion", "chunk", and "marker")',
+ 'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index fb22337e27..f86113a448 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -141,9 +141,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
add_fields("redacts")
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
- add_fields(EventContentFields.MSC2716_NEXT_CHUNK_ID)
- elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_CHUNK:
- add_fields(EventContentFields.MSC2716_CHUNK_ID)
+ add_fields(EventContentFields.MSC2716_NEXT_BATCH_ID)
+ elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
+ add_fields(EventContentFields.MSC2716_BATCH_ID)
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index bf48536308..6cd694b2da 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1425,7 +1425,7 @@ class EventCreationHandler:
# structural protocol level).
is_msc2716_event = (
original_event.type == EventTypes.MSC2716_INSERTION
- or original_event.type == EventTypes.MSC2716_CHUNK
+ or original_event.type == EventTypes.MSC2716_BATCH
or original_event.type == EventTypes.MSC2716_MARKER
)
if not room_version_obj.msc2716_historical and is_msc2716_event:
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
index f73ccc7f65..bf14ec384e 100644
--- a/synapse/rest/client/room_batch.py
+++ b/synapse/rest/client/room_batch.py
@@ -43,25 +43,25 @@ logger = logging.getLogger(__name__)
class RoomBatchSendEventRestServlet(RestServlet):
"""
- API endpoint which can insert a chunk of events historically back in time
+ API endpoint which can insert a batch of events historically back in time
next to the given `prev_event`.
- `chunk_id` comes from `next_chunk_id `in the response of the batch send
- endpoint and is derived from the "insertion" events added to each chunk.
+ `batch_id` comes from `next_batch_id `in the response of the batch send
+ endpoint and is derived from the "insertion" events added to each batch.
It's not required for the first batch send.
`state_events_at_start` is used to define the historical state events
needed to auth the events like join events. These events will float
outside of the normal DAG as outlier's and won't be visible in the chat
- history which also allows us to insert multiple chunks without having a bunch
- of `@mxid joined the room` noise between each chunk.
+ history which also allows us to insert multiple batches without having a bunch
+ of `@mxid joined the room` noise between each batch.
- `events` is chronological chunk/list of events you want to insert.
- There is a reverse-chronological constraint on chunks so once you insert
+ `events` is chronological list of events you want to insert.
+ There is a reverse-chronological constraint on batches so once you insert
some messages, you can only insert older ones after that.
- tldr; Insert chunks from your most recent history -> oldest history.
+ tldr; Insert batches from your most recent history -> oldest history.
- POST /_matrix/client/unstable/org.matrix.msc2716/rooms//batch_send?prev_event_id=&chunk_id=
+ POST /_matrix/client/unstable/org.matrix.msc2716/rooms//batch_send?prev_event_id=&batch_id=
{
"events": [ ... ],
"state_events_at_start": [ ... ]
@@ -129,7 +129,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
self, sender: str, room_id: str, origin_server_ts: int
) -> JsonDict:
"""Creates an event dict for an "insertion" event with the proper fields
- and a random chunk ID.
+ and a random batch ID.
Args:
sender: The event author MXID
@@ -140,13 +140,13 @@ class RoomBatchSendEventRestServlet(RestServlet):
The new event dictionary to insert.
"""
- next_chunk_id = random_string(8)
+ next_batch_id = random_string(8)
insertion_event = {
"type": EventTypes.MSC2716_INSERTION,
"sender": sender,
"room_id": room_id,
"content": {
- EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
+ EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id,
EventContentFields.MSC2716_HISTORICAL: True,
},
"origin_server_ts": origin_server_ts,
@@ -191,7 +191,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
prev_event_ids_from_query = parse_strings_from_args(
request.args, "prev_event_id"
)
- chunk_id_from_query = parse_string(request, "chunk_id")
+ batch_id_from_query = parse_string(request, "batch_id")
if prev_event_ids_from_query is None:
raise SynapseError(
@@ -291,27 +291,27 @@ class RoomBatchSendEventRestServlet(RestServlet):
prev_event_ids_from_query
)
- # Figure out which chunk to connect to. If they passed in
- # chunk_id_from_query let's use it. The chunk ID passed in comes
- # from the chunk_id in the "insertion" event from the previous chunk.
- last_event_in_chunk = events_to_create[-1]
- chunk_id_to_connect_to = chunk_id_from_query
+ # Figure out which batch to connect to. If they passed in
+ # batch_id_from_query let's use it. The batch ID passed in comes
+ # from the batch_id in the "insertion" event from the previous batch.
+ last_event_in_batch = events_to_create[-1]
+ batch_id_to_connect_to = batch_id_from_query
base_insertion_event = None
- if chunk_id_from_query:
+ if batch_id_from_query:
# All but the first base insertion event should point at a fake
# event, which causes the HS to ask for the state at the start of
- # the chunk later.
+ # the batch later.
prev_event_ids = [fake_prev_event_id]
- # Verify the chunk_id_from_query corresponds to an actual insertion event
- # and have the chunk connected.
+ # Verify the batch_id_from_query corresponds to an actual insertion event
+ # and have the batch connected.
corresponding_insertion_event_id = (
- await self.store.get_insertion_event_by_chunk_id(chunk_id_from_query)
+ await self.store.get_insertion_event_by_batch_id(batch_id_from_query)
)
if corresponding_insertion_event_id is None:
raise SynapseError(
400,
- "No insertion event corresponds to the given ?chunk_id",
+ "No insertion event corresponds to the given ?batch_id",
errcode=Codes.INVALID_PARAM,
)
pass
@@ -328,7 +328,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
base_insertion_event_dict = self._create_insertion_event_dict(
sender=requester.user.to_string(),
room_id=room_id,
- origin_server_ts=last_event_in_chunk["origin_server_ts"],
+ origin_server_ts=last_event_in_batch["origin_server_ts"],
)
base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
@@ -347,38 +347,38 @@ class RoomBatchSendEventRestServlet(RestServlet):
depth=inherited_depth,
)
- chunk_id_to_connect_to = base_insertion_event["content"][
- EventContentFields.MSC2716_NEXT_CHUNK_ID
+ batch_id_to_connect_to = base_insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_BATCH_ID
]
- # Connect this current chunk to the insertion event from the previous chunk
- chunk_event = {
- "type": EventTypes.MSC2716_CHUNK,
+ # Connect this current batch to the insertion event from the previous batch
+ batch_event = {
+ "type": EventTypes.MSC2716_BATCH,
"sender": requester.user.to_string(),
"room_id": room_id,
"content": {
- EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to,
+ EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to,
EventContentFields.MSC2716_HISTORICAL: True,
},
- # Since the chunk event is put at the end of the chunk,
+ # Since the batch event is put at the end of the batch,
# where the newest-in-time event is, copy the origin_server_ts from
# the last event we're inserting
- "origin_server_ts": last_event_in_chunk["origin_server_ts"],
+ "origin_server_ts": last_event_in_batch["origin_server_ts"],
}
- # Add the chunk event to the end of the chunk (newest-in-time)
- events_to_create.append(chunk_event)
+ # Add the batch event to the end of the batch (newest-in-time)
+ events_to_create.append(batch_event)
- # Add an "insertion" event to the start of each chunk (next to the oldest-in-time
- # event in the chunk) so the next chunk can be connected to this one.
+ # Add an "insertion" event to the start of each batch (next to the oldest-in-time
+ # event in the batch) so the next batch can be connected to this one.
insertion_event = self._create_insertion_event_dict(
sender=requester.user.to_string(),
room_id=room_id,
- # Since the insertion event is put at the start of the chunk,
+ # Since the insertion event is put at the start of the batch,
# where the oldest-in-time event is, copy the origin_server_ts from
# the first event we're inserting
origin_server_ts=events_to_create[0]["origin_server_ts"],
)
- # Prepend the insertion event to the start of the chunk (oldest-in-time)
+ # Prepend the insertion event to the start of the batch (oldest-in-time)
events_to_create = [insertion_event] + events_to_create
event_ids = []
@@ -439,17 +439,17 @@ class RoomBatchSendEventRestServlet(RestServlet):
)
insertion_event_id = event_ids[0]
- chunk_event_id = event_ids[-1]
+ batch_event_id = event_ids[-1]
historical_event_ids = event_ids[1:-1]
response_dict = {
"state_event_ids": state_event_ids_at_start,
"event_ids": historical_event_ids,
- "next_chunk_id": insertion_event["content"][
- EventContentFields.MSC2716_NEXT_CHUNK_ID
+ "next_batch_id": insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_BATCH_ID
],
"insertion_event_id": insertion_event_id,
- "chunk_event_id": chunk_event_id,
+ "batch_event_id": batch_event_id,
}
if base_insertion_event is not None:
response_dict["base_insertion_event_id"] = base_insertion_event.event_id
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 047782eb06..10184d6ae7 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -1034,13 +1034,13 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
LIMIT ?
"""
- # Find any chunk connections of a given insertion event
- chunk_connection_query = """
+ # Find any batch connections of a given insertion event
+ batch_connection_query = """
SELECT e.depth, c.event_id FROM insertion_events AS i
- /* Find the chunk that connects to the given insertion event */
- INNER JOIN chunk_events AS c
- ON i.next_chunk_id = c.chunk_id
- /* Get the depth of the chunk start event from the events table */
+ /* Find the batch that connects to the given insertion event */
+ INNER JOIN batch_events AS c
+ ON i.next_batch_id = c.batch_id
+ /* Get the depth of the batch start event from the events table */
INNER JOIN events AS e USING (event_id)
/* Find an insertion event which matches the given event_id */
WHERE i.event_id = ?
@@ -1077,12 +1077,12 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
event_results.add(event_id)
- # Try and find any potential historical chunks of message history.
+ # Try and find any potential historical batches of message history.
#
# First we look for an insertion event connected to the current
# event (by prev_event). If we find any, we need to go and try to
- # find any chunk events connected to the insertion event (by
- # chunk_id). If we find any, we'll add them to the queue and
+ # find any batch events connected to the insertion event (by
+ # batch_id). If we find any, we'll add them to the queue and
# navigate up the DAG like normal in the next iteration of the loop.
txn.execute(
connected_insertion_event_query, (event_id, limit - len(event_results))
@@ -1097,17 +1097,17 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
connected_insertion_event = row[1]
queue.put((-connected_insertion_event_depth, connected_insertion_event))
- # Find any chunk connections for the given insertion event
+ # Find any batch connections for the given insertion event
txn.execute(
- chunk_connection_query,
+ batch_connection_query,
(connected_insertion_event, limit - len(event_results)),
)
- chunk_start_event_id_results = txn.fetchall()
+ batch_start_event_id_results = txn.fetchall()
logger.debug(
- "_get_backfill_events: chunk_start_event_id_results %s",
- chunk_start_event_id_results,
+ "_get_backfill_events: batch_start_event_id_results %s",
+ batch_start_event_id_results,
)
- for row in chunk_start_event_id_results:
+ for row in batch_start_event_id_results:
if row[1] not in event_results:
queue.put((-row[0], row[1]))
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index dec7e8594e..584f818ff3 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1509,7 +1509,7 @@ class PersistEventsStore:
self._handle_event_relations(txn, event)
self._handle_insertion_event(txn, event)
- self._handle_chunk_event(txn, event)
+ self._handle_batch_event(txn, event)
# Store the labels for this event.
labels = event.content.get(EventContentFields.LABELS)
@@ -1790,23 +1790,23 @@ class PersistEventsStore:
):
return
- next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID)
- if next_chunk_id is None:
- # Invalid insertion event without next chunk ID
+ next_batch_id = event.content.get(EventContentFields.MSC2716_NEXT_BATCH_ID)
+ if next_batch_id is None:
+ # Invalid insertion event without next batch ID
return
logger.debug(
- "_handle_insertion_event (next_chunk_id=%s) %s", next_chunk_id, event
+ "_handle_insertion_event (next_batch_id=%s) %s", next_batch_id, event
)
- # Keep track of the insertion event and the chunk ID
+ # Keep track of the insertion event and the batch ID
self.db_pool.simple_insert_txn(
txn,
table="insertion_events",
values={
"event_id": event.event_id,
"room_id": event.room_id,
- "next_chunk_id": next_chunk_id,
+ "next_batch_id": next_batch_id,
},
)
@@ -1822,8 +1822,8 @@ class PersistEventsStore:
},
)
- def _handle_chunk_event(self, txn: LoggingTransaction, event: EventBase):
- """Handles inserting the chunk edges/connections between the chunk event
+ def _handle_batch_event(self, txn: LoggingTransaction, event: EventBase):
+ """Handles inserting the batch edges/connections between the batch event
and an insertion event. Part of MSC2716.
Args:
@@ -1831,11 +1831,11 @@ class PersistEventsStore:
event: The event to process
"""
- if event.type != EventTypes.MSC2716_CHUNK:
- # Not a chunk event
+ if event.type != EventTypes.MSC2716_BATCH:
+ # Not a batch event
return
- # Skip processing a chunk event if the room version doesn't
+ # Skip processing a batch event if the room version doesn't
# support it or the event is not from the room creator.
room_version = self.store.get_room_version_txn(txn, event.room_id)
room_creator = self.db_pool.simple_select_one_onecol_txn(
@@ -1852,35 +1852,35 @@ class PersistEventsStore:
):
return
- chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID)
- if chunk_id is None:
- # Invalid chunk event without a chunk ID
+ batch_id = event.content.get(EventContentFields.MSC2716_BATCH_ID)
+ if batch_id is None:
+ # Invalid batch event without a batch ID
return
- logger.debug("_handle_chunk_event chunk_id=%s %s", chunk_id, event)
+ logger.debug("_handle_batch_event batch_id=%s %s", batch_id, event)
- # Keep track of the insertion event and the chunk ID
+ # Keep track of the insertion event and the batch ID
self.db_pool.simple_insert_txn(
txn,
- table="chunk_events",
+ table="batch_events",
values={
"event_id": event.event_id,
"room_id": event.room_id,
- "chunk_id": chunk_id,
+ "batch_id": batch_id,
},
)
- # When we receive an event with a `chunk_id` referencing the
- # `next_chunk_id` of the insertion event, we can remove it from the
+ # When we receive an event with a `batch_id` referencing the
+ # `next_batch_id` of the insertion event, we can remove it from the
# `insertion_event_extremities` table.
sql = """
DELETE FROM insertion_event_extremities WHERE event_id IN (
SELECT event_id FROM insertion_events
- WHERE next_chunk_id = ?
+ WHERE next_batch_id = ?
)
"""
- txn.execute(sql, (chunk_id,))
+ txn.execute(sql, (batch_id,))
def _handle_redaction(self, txn, redacted_event_id):
"""Handles receiving a redaction and checking whether we need to remove
diff --git a/synapse/storage/databases/main/room_batch.py b/synapse/storage/databases/main/room_batch.py
index 54fa361d3e..a383388757 100644
--- a/synapse/storage/databases/main/room_batch.py
+++ b/synapse/storage/databases/main/room_batch.py
@@ -18,11 +18,11 @@ from synapse.storage._base import SQLBaseStore
class RoomBatchStore(SQLBaseStore):
- async def get_insertion_event_by_chunk_id(self, chunk_id: str) -> Optional[str]:
+ async def get_insertion_event_by_batch_id(self, batch_id: str) -> Optional[str]:
"""Retrieve a insertion event ID.
Args:
- chunk_id: The chunk ID of the insertion event to retrieve.
+ batch_id: The batch ID of the insertion event to retrieve.
Returns:
The event_id of an insertion event, or None if there is no known
@@ -30,7 +30,7 @@ class RoomBatchStore(SQLBaseStore):
"""
return await self.db_pool.simple_select_one_onecol(
table="insertion_events",
- keyvalues={"next_chunk_id": chunk_id},
+ keyvalues={"next_batch_id": batch_id},
retcol="event_id",
allow_none=True,
)
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index af9cc69949..aa2ce44c6c 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -14,7 +14,7 @@
# When updating these values, please leave a short summary of the changes below.
-SCHEMA_VERSION = 63
+SCHEMA_VERSION = 64
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
diff --git a/synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres b/synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres
new file mode 100644
index 0000000000..5f38993208
--- /dev/null
+++ b/synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres
@@ -0,0 +1,23 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE insertion_events RENAME COLUMN next_chunk_id TO next_batch_id;
+DROP INDEX insertion_events_next_chunk_id;
+CREATE INDEX IF NOT EXISTS insertion_events_next_batch_id ON insertion_events(next_batch_id);
+
+ALTER TABLE chunk_events RENAME TO batch_events;
+ALTER TABLE batch_events RENAME COLUMN chunk_id TO batch_id;
+DROP INDEX chunk_events_chunk_id;
+CREATE INDEX IF NOT EXISTS batch_events_batch_id ON batch_events(batch_id);
diff --git a/synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite b/synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite
new file mode 100644
index 0000000000..4989563995
--- /dev/null
+++ b/synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite
@@ -0,0 +1,37 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Re-create the insertion_events table since SQLite doesn't support better
+-- renames for columns (next_chunk_id -> next_batch_id)
+DROP TABLE insertion_events;
+CREATE TABLE IF NOT EXISTS insertion_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ next_batch_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX IF NOT EXISTS insertion_events_event_id ON insertion_events(event_id);
+CREATE INDEX IF NOT EXISTS insertion_events_next_batch_id ON insertion_events(next_batch_id);
+
+-- Re-create the chunk_events table since SQLite doesn't support better renames
+-- for columns (chunk_id -> batch_id)
+DROP TABLE chunk_events;
+CREATE TABLE IF NOT EXISTS batch_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ batch_id TEXT NOT NULL
+);
+
+CREATE UNIQUE INDEX IF NOT EXISTS batch_events_event_id ON batch_events(event_id);
+CREATE INDEX IF NOT EXISTS batch_events_batch_id ON batch_events(batch_id);
--
cgit 1.5.1
From a2d7195e0111ee6b2fedaabb0f02cfae648cd347 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Wed, 22 Sep 2021 10:59:52 +0100
Subject: Track why we're evicting from caches (#10829)
So we can see distinguish between "evicting because the cache is too big" and "evicting because the cache entries haven't been recently used".
---
changelog.d/10829.misc | 1 +
synapse/util/caches/__init__.py | 31 +++++++++++++++++++++++--------
synapse/util/caches/expiringcache.py | 10 +++++-----
synapse/util/caches/lrucache.py | 4 ++--
4 files changed, 31 insertions(+), 15 deletions(-)
create mode 100644 changelog.d/10829.misc
diff --git a/changelog.d/10829.misc b/changelog.d/10829.misc
new file mode 100644
index 0000000000..ac5fd6b047
--- /dev/null
+++ b/changelog.d/10829.misc
@@ -0,0 +1 @@
+Track cache eviction rates more finely in Prometheus' monitoring.
\ No newline at end of file
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index cab1bf0c15..df4d61e4b6 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -12,8 +12,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import collections
import logging
+import typing
+from enum import Enum, auto
from sys import intern
from typing import Callable, Dict, Optional, Sized
@@ -34,7 +36,7 @@ collectors_by_name: Dict[str, "CacheMetric"] = {}
cache_size = Gauge("synapse_util_caches_cache:size", "", ["name"])
cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"])
-cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"])
+cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name", "reason"])
cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"])
cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"])
cache_memory_usage = Gauge(
@@ -46,11 +48,16 @@ cache_memory_usage = Gauge(
response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"])
response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"])
response_cache_evicted = Gauge(
- "synapse_util_caches_response_cache:evicted_size", "", ["name"]
+ "synapse_util_caches_response_cache:evicted_size", "", ["name", "reason"]
)
response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"])
+class EvictionReason(Enum):
+ size = auto()
+ time = auto()
+
+
@attr.s(slots=True)
class CacheMetric:
@@ -61,7 +68,9 @@ class CacheMetric:
hits = attr.ib(default=0)
misses = attr.ib(default=0)
- evicted_size = attr.ib(default=0)
+ eviction_size_by_reason: typing.Counter[EvictionReason] = attr.ib(
+ factory=collections.Counter
+ )
memory_usage = attr.ib(default=None)
def inc_hits(self) -> None:
@@ -70,8 +79,8 @@ class CacheMetric:
def inc_misses(self) -> None:
self.misses += 1
- def inc_evictions(self, size: int = 1) -> None:
- self.evicted_size += size
+ def inc_evictions(self, reason: EvictionReason, size: int = 1) -> None:
+ self.eviction_size_by_reason[reason] += size
def inc_memory_usage(self, memory: int) -> None:
if self.memory_usage is None:
@@ -94,14 +103,20 @@ class CacheMetric:
if self._cache_type == "response_cache":
response_cache_size.labels(self._cache_name).set(len(self._cache))
response_cache_hits.labels(self._cache_name).set(self.hits)
- response_cache_evicted.labels(self._cache_name).set(self.evicted_size)
+ for reason in EvictionReason:
+ response_cache_evicted.labels(self._cache_name, reason.name).set(
+ self.eviction_size_by_reason[reason]
+ )
response_cache_total.labels(self._cache_name).set(
self.hits + self.misses
)
else:
cache_size.labels(self._cache_name).set(len(self._cache))
cache_hits.labels(self._cache_name).set(self.hits)
- cache_evicted.labels(self._cache_name).set(self.evicted_size)
+ for reason in EvictionReason:
+ cache_evicted.labels(self._cache_name, reason.name).set(
+ self.eviction_size_by_reason[reason]
+ )
cache_total.labels(self._cache_name).set(self.hits + self.misses)
if getattr(self._cache, "max_size", None):
cache_max_size.labels(self._cache_name).set(self._cache.max_size)
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index bde16b8577..c3f72aa06d 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -22,7 +22,7 @@ from typing_extensions import Literal
from synapse.config import cache as cache_config
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util import Clock
-from synapse.util.caches import register_cache
+from synapse.util.caches import EvictionReason, register_cache
logger = logging.getLogger(__name__)
@@ -98,9 +98,9 @@ class ExpiringCache(Generic[KT, VT]):
while self._max_size and len(self) > self._max_size:
_key, value = self._cache.popitem(last=False)
if self.iterable:
- self.metrics.inc_evictions(len(value.value))
+ self.metrics.inc_evictions(EvictionReason.size, len(value.value))
else:
- self.metrics.inc_evictions()
+ self.metrics.inc_evictions(EvictionReason.size)
def __getitem__(self, key: KT) -> VT:
try:
@@ -175,9 +175,9 @@ class ExpiringCache(Generic[KT, VT]):
for k in keys_to_delete:
value = self._cache.pop(k)
if self.iterable:
- self.metrics.inc_evictions(len(value.value))
+ self.metrics.inc_evictions(EvictionReason.time, len(value.value))
else:
- self.metrics.inc_evictions()
+ self.metrics.inc_evictions(EvictionReason.time)
logger.debug(
"[%s] _prune_cache before: %d, after len: %d",
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 39dce9dd41..ea6e8dc8d1 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -40,7 +40,7 @@ from twisted.internet.interfaces import IReactorTime
from synapse.config import cache as cache_config
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.util import Clock, caches
-from synapse.util.caches import CacheMetric, register_cache
+from synapse.util.caches import CacheMetric, EvictionReason, register_cache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
from synapse.util.linked_list import ListNode
@@ -403,7 +403,7 @@ class LruCache(Generic[KT, VT]):
evicted_len = delete_node(node)
cache.pop(node.key, None)
if metrics:
- metrics.inc_evictions(evicted_len)
+ metrics.inc_evictions(EvictionReason.size, evicted_len)
def synchronized(f: FT) -> FT:
@wraps(f)
--
cgit 1.5.1
From 4ecf51812ebf4cbacd3c6042aa29cb37b7855da3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 22 Sep 2021 12:30:59 +0100
Subject: Include outlier status in `str(event)` for V2/V3 events (#10879)
I meant to do this before, in #10591, but because I'm stupid I forgot to do it
for V2 and V3 events.
I've factored the common code out to `EventBase` to save us having two copies
of it.
This means that for `FrozenEvent` we replace `self.get("event_id", None)` with
`self.event_id`, which I think is safe. `get()` is an alias for
`self._dict.get()`, whereas `event_id()` is an `@property` method which looks
up `self._event_id`, which is populated during construction from the same
dict. We don't seem to rely on the fallback, because if the `event_id` key is
absent from the dict then construction of the `EventBase` object will
fail.
Long story short, the only way this could change behaviour is if
`event_dict["event_id"]` is changed *after* the `EventBase` object is
constructed without updating the `_event_id` field, or vice versa - either of
which would be very problematic anyway and the behavior of `str(event)` is the
least of our worries.
---
changelog.d/10879.misc | 1 +
synapse/events/__init__.py | 34 ++++++++++++----------------------
2 files changed, 13 insertions(+), 22 deletions(-)
create mode 100644 changelog.d/10879.misc
diff --git a/changelog.d/10879.misc b/changelog.d/10879.misc
new file mode 100644
index 0000000000..acc04930fa
--- /dev/null
+++ b/changelog.d/10879.misc
@@ -0,0 +1 @@
+Include outlier status when we log V2 or V3 events.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index a730c1719a..49190459c8 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -344,6 +344,18 @@ class EventBase(metaclass=abc.ABCMeta):
# this will be a no-op if the event dict is already frozen.
self._dict = freeze(self._dict)
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
+ self.__class__.__name__,
+ self.event_id,
+ self.get("type", None),
+ self.get("state_key", None),
+ self.internal_metadata.is_outlier(),
+ )
+
class FrozenEvent(EventBase):
format_version = EventFormatVersions.V1 # All events of this type are V1
@@ -392,17 +404,6 @@ class FrozenEvent(EventBase):
def event_id(self) -> str:
return self._event_id
- def __str__(self):
- return self.__repr__()
-
- def __repr__(self):
- return "" % (
- self.get("event_id", None),
- self.get("type", None),
- self.get("state_key", None),
- self.internal_metadata.is_outlier(),
- )
-
class FrozenEventV2(EventBase):
format_version = EventFormatVersions.V2 # All events of this type are V2
@@ -478,17 +479,6 @@ class FrozenEventV2(EventBase):
"""
return self.auth_events
- def __str__(self):
- return self.__repr__()
-
- def __repr__(self):
- return "<%s event_id=%r, type=%r, state_key=%r>" % (
- self.__class__.__name__,
- self.event_id,
- self.get("type", None),
- self.get("state_key", None),
- )
-
class FrozenEventV3(FrozenEventV2):
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
--
cgit 1.5.1
From 80828eda06f8e3d6a930c9fa45204ad6fef1d411 Mon Sep 17 00:00:00 2001
From: David Teller
Date: Wed, 22 Sep 2021 15:09:43 +0200
Subject: Extend ModuleApi with the methods we'll need to reject spam based on
…IP - resolves #10832 (#10833)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Extend ModuleApi with the methods we'll need to reject spam based on IP - resolves #10832
Signed-off-by: David Teller
---
changelog.d/10833.misc | 1 +
synapse/module_api/__init__.py | 82 +++++++++++++++++++++++++++-
synapse/storage/databases/main/client_ips.py | 27 ++++++---
tests/module_api/test_api.py | 72 ++++++++++++++++++++++++
4 files changed, 174 insertions(+), 8 deletions(-)
create mode 100644 changelog.d/10833.misc
diff --git a/changelog.d/10833.misc b/changelog.d/10833.misc
new file mode 100644
index 0000000000..f23c0a1a02
--- /dev/null
+++ b/changelog.d/10833.misc
@@ -0,0 +1 @@
+Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data.
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 3196c2bec6..174e6934a8 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -24,8 +24,10 @@ from typing import (
List,
Optional,
Tuple,
+ Union,
)
+import attr
import jinja2
from twisted.internet import defer
@@ -46,7 +48,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.state import StateFilter
-from synapse.types import JsonDict, Requester, UserID, UserInfo, create_requester
+from synapse.types import (
+ DomainSpecificString,
+ JsonDict,
+ Requester,
+ UserID,
+ UserInfo,
+ create_requester,
+)
from synapse.util import Clock
from synapse.util.caches.descriptors import cached
@@ -79,6 +88,18 @@ __all__ = [
logger = logging.getLogger(__name__)
+@attr.s(auto_attribs=True)
+class UserIpAndAgent:
+ """
+ An IP address and user agent used by a user to connect to this homeserver.
+ """
+
+ ip: str
+ user_agent: str
+ # The time at which this user agent/ip was last seen.
+ last_seen: int
+
+
class ModuleApi:
"""A proxy object that gets passed to various plugin modules so they
can register new users etc if necessary.
@@ -700,6 +721,65 @@ class ModuleApi:
(td for td in (self.custom_template_dir, custom_template_directory) if td),
)
+ def is_mine(self, id: Union[str, DomainSpecificString]) -> bool:
+ """
+ Checks whether an ID (user id, room, ...) comes from this homeserver.
+
+ Args:
+ id: any Matrix id (e.g. user id, room id, ...), either as a raw id,
+ e.g. string "@user:example.com" or as a parsed UserID, RoomID, ...
+ Returns:
+ True if id comes from this homeserver, False otherwise.
+
+ Added in Synapse v1.44.0.
+ """
+ if isinstance(id, DomainSpecificString):
+ return self._hs.is_mine(id)
+ else:
+ return self._hs.is_mine_id(id)
+
+ async def get_user_ip_and_agents(
+ self, user_id: str, since_ts: int = 0
+ ) -> List[UserIpAndAgent]:
+ """
+ Return the list of user IPs and agents for a user.
+
+ Args:
+ user_id: the id of a user, local or remote
+ since_ts: a timestamp in seconds since the epoch,
+ or the epoch itself if not specified.
+ Returns:
+ The list of all UserIpAndAgent that the user has
+ used to connect to this homeserver since `since_ts`.
+ If the user is remote, this list is empty.
+
+ Added in Synapse v1.44.0.
+ """
+ # Don't hit the db if this is not a local user.
+ is_mine = False
+ try:
+ # Let's be defensive against ill-formed strings.
+ if self.is_mine(user_id):
+ is_mine = True
+ except Exception:
+ pass
+
+ if is_mine:
+ raw_data = await self._store.get_user_ip_and_agents(
+ UserID.from_string(user_id), since_ts
+ )
+ # Sanitize some of the data. We don't want to return tokens.
+ return [
+ UserIpAndAgent(
+ ip=str(data["ip"]),
+ user_agent=str(data["user_agent"]),
+ last_seen=int(data["last_seen"]),
+ )
+ for data in raw_data
+ ]
+ else:
+ return []
+
class PublicRoomListManager:
"""Contains methods for adding to, removing from and querying whether a room
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 7a98275d92..7e33ae578c 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -555,8 +555,11 @@ class ClientIpStore(ClientIpWorkerStore):
return ret
async def get_user_ip_and_agents(
- self, user: UserID
+ self, user: UserID, since_ts: int = 0
) -> List[Dict[str, Union[str, int]]]:
+ """
+ Fetch IP/User Agent connection since a given timestamp.
+ """
user_id = user.to_string()
results = {}
@@ -568,13 +571,23 @@ class ClientIpStore(ClientIpWorkerStore):
) = key
if uid == user_id:
user_agent, _, last_seen = self._batch_row_update[key]
- results[(access_token, ip)] = (user_agent, last_seen)
+ if last_seen >= since_ts:
+ results[(access_token, ip)] = (user_agent, last_seen)
- rows = await self.db_pool.simple_select_list(
- table="user_ips",
- keyvalues={"user_id": user_id},
- retcols=["access_token", "ip", "user_agent", "last_seen"],
- desc="get_user_ip_and_agents",
+ def get_recent(txn):
+ txn.execute(
+ """
+ SELECT access_token, ip, user_agent, last_seen FROM user_ips
+ WHERE last_seen >= ? AND user_id = ?
+ ORDER BY last_seen
+ DESC
+ """,
+ (since_ts, user_id),
+ )
+ return txn.fetchall()
+
+ rows = await self.db_pool.runInteraction(
+ desc="get_user_ip_and_agents", func=get_recent
)
results.update(
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 7dd519cd44..9d38974fba 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -43,6 +43,7 @@ class ModuleApiTestCase(HomeserverTestCase):
self.module_api = homeserver.get_module_api()
self.event_creation_handler = homeserver.get_event_creation_handler()
self.sync_handler = homeserver.get_sync_handler()
+ self.auth_handler = homeserver.get_auth_handler()
def make_homeserver(self, reactor, clock):
return self.setup_test_homeserver(
@@ -89,6 +90,77 @@ class ModuleApiTestCase(HomeserverTestCase):
found_user = self.get_success(self.module_api.get_userinfo_by_id("@alice:test"))
self.assertIsNone(found_user)
+ def test_get_user_ip_and_agents(self):
+ user_id = self.register_user("test_get_user_ip_and_agents_user", "1234")
+
+ # Initially, we should have no ip/agent for our user.
+ info = self.get_success(self.module_api.get_user_ip_and_agents(user_id))
+ self.assertEqual(info, [])
+
+ # Insert a first ip, agent. We should be able to retrieve it.
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip_1", "user_agent_1", "device_1", None
+ )
+ )
+ info = self.get_success(self.module_api.get_user_ip_and_agents(user_id))
+
+ self.assertEqual(len(info), 1)
+ last_seen_1 = info[0].last_seen
+
+ # Insert a second ip, agent at a later date. We should be able to retrieve it.
+ last_seen_2 = last_seen_1 + 10000
+ print("%s => %s" % (last_seen_1, last_seen_2))
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip_2", "user_agent_2", "device_2", last_seen_2
+ )
+ )
+ info = self.get_success(self.module_api.get_user_ip_and_agents(user_id))
+
+ self.assertEqual(len(info), 2)
+ ip_1_seen = False
+ ip_2_seen = False
+
+ for i in info:
+ if i.ip == "ip_1":
+ ip_1_seen = True
+ self.assertEqual(i.user_agent, "user_agent_1")
+ self.assertEqual(i.last_seen, last_seen_1)
+ elif i.ip == "ip_2":
+ ip_2_seen = True
+ self.assertEqual(i.user_agent, "user_agent_2")
+ self.assertEqual(i.last_seen, last_seen_2)
+ self.assertTrue(ip_1_seen)
+ self.assertTrue(ip_2_seen)
+
+ # If we fetch from a midpoint between last_seen_1 and last_seen_2,
+ # we should only find the second ip, agent.
+ info = self.get_success(
+ self.module_api.get_user_ip_and_agents(
+ user_id, (last_seen_1 + last_seen_2) / 2
+ )
+ )
+ self.assertEqual(len(info), 1)
+ self.assertEqual(info[0].ip, "ip_2")
+ self.assertEqual(info[0].user_agent, "user_agent_2")
+ self.assertEqual(info[0].last_seen, last_seen_2)
+
+ # If we fetch from a point later than last_seen_2, we shouldn't
+ # find anything.
+ info = self.get_success(
+ self.module_api.get_user_ip_and_agents(user_id, last_seen_2 + 10000)
+ )
+ self.assertEqual(info, [])
+
+ def test_get_user_ip_and_agents__no_user_found(self):
+ info = self.get_success(
+ self.module_api.get_user_ip_and_agents(
+ "@test_get_user_ip_and_agents_user_nonexistent:example.com"
+ )
+ )
+ self.assertEqual(info, [])
+
def test_sending_events_into_room(self):
"""Tests that a module can send events into a room"""
# Mock out create_and_send_nonmember_event to check whether events are being sent
--
cgit 1.5.1
From 724aef9a878cebc137c81f3b261bafb9302fb592 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Wed, 22 Sep 2021 14:21:58 +0100
Subject: Opt out of cache expiry for `get_users_who_share_room_with_user`
(#10826)
* Allow LruCaches to opt out of time-based expiry
* Don't expire `get_users_who_share_room` & friends
---
changelog.d/10826.misc | 2 ++
synapse/storage/databases/main/roommember.py | 11 ++++++++---
synapse/util/caches/deferred_cache.py | 2 ++
synapse/util/caches/descriptors.py | 5 +++++
synapse/util/caches/lrucache.py | 16 +++++++++++++---
5 files changed, 30 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/10826.misc
diff --git a/changelog.d/10826.misc b/changelog.d/10826.misc
new file mode 100644
index 0000000000..53e56fc362
--- /dev/null
+++ b/changelog.d/10826.misc
@@ -0,0 +1,2 @@
+Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
+haven't synced recently.
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 9beeb96aa9..a4ec6bc328 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -162,7 +162,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
self._check_safe_current_state_events_membership_updated_txn,
)
- @cached(max_entries=100000, iterable=True)
+ @cached(max_entries=100000, iterable=True, prune_unread_entries=False)
async def get_users_in_room(self, room_id: str) -> List[str]:
return await self.db_pool.runInteraction(
"get_users_in_room", self.get_users_in_room_txn, room_id
@@ -439,7 +439,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
return results_dict.get("membership"), results_dict.get("event_id")
- @cached(max_entries=500000, iterable=True)
+ @cached(max_entries=500000, iterable=True, prune_unread_entries=False)
async def get_rooms_for_user_with_stream_ordering(
self, user_id: str
) -> FrozenSet[GetRoomsForUserWithStreamOrdering]:
@@ -544,7 +544,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
return frozenset(r.room_id for r in rooms)
- @cached(max_entries=500000, cache_context=True, iterable=True)
+ @cached(
+ max_entries=500000,
+ cache_context=True,
+ iterable=True,
+ prune_unread_entries=False,
+ )
async def get_users_who_share_room_with_user(
self, user_id: str, cache_context: _CacheContext
) -> Set[str]:
diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index f05590da0d..6262efe072 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -73,6 +73,7 @@ class DeferredCache(Generic[KT, VT]):
tree: bool = False,
iterable: bool = False,
apply_cache_factor_from_config: bool = True,
+ prune_unread_entries: bool = True,
):
"""
Args:
@@ -105,6 +106,7 @@ class DeferredCache(Generic[KT, VT]):
size_callback=(lambda d: len(d) or 1) if iterable else None,
metrics_collection_callback=metrics_cb,
apply_cache_factor_from_config=apply_cache_factor_from_config,
+ prune_unread_entries=prune_unread_entries,
)
self.thread: Optional[threading.Thread] = None
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 1ca31e41ac..b9dcca17f1 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -258,6 +258,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
tree=False,
cache_context=False,
iterable=False,
+ prune_unread_entries: bool = True,
):
super().__init__(orig, num_args=num_args, cache_context=cache_context)
@@ -269,6 +270,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
self.max_entries = max_entries
self.tree = tree
self.iterable = iterable
+ self.prune_unread_entries = prune_unread_entries
def __get__(self, obj, owner):
cache: DeferredCache[CacheKey, Any] = DeferredCache(
@@ -276,6 +278,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
max_entries=self.max_entries,
tree=self.tree,
iterable=self.iterable,
+ prune_unread_entries=self.prune_unread_entries,
)
get_cache_key = self.cache_key_builder
@@ -507,6 +510,7 @@ def cached(
tree: bool = False,
cache_context: bool = False,
iterable: bool = False,
+ prune_unread_entries: bool = True,
) -> Callable[[F], _CachedFunction[F]]:
func = lambda orig: DeferredCacheDescriptor(
orig,
@@ -515,6 +519,7 @@ def cached(
tree=tree,
cache_context=cache_context,
iterable=iterable,
+ prune_unread_entries=prune_unread_entries,
)
return cast(Callable[[F], _CachedFunction[F]], func)
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index ea6e8dc8d1..4ff62b403f 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -202,10 +202,11 @@ class _Node:
cache: "weakref.ReferenceType[LruCache]",
clock: Clock,
callbacks: Collection[Callable[[], None]] = (),
+ prune_unread_entries: bool = True,
):
self._list_node = ListNode.insert_after(self, root)
- self._global_list_node = None
- if USE_GLOBAL_LIST:
+ self._global_list_node: Optional[_TimedListNode] = None
+ if USE_GLOBAL_LIST and prune_unread_entries:
self._global_list_node = _TimedListNode.insert_after(self, GLOBAL_ROOT)
self._global_list_node.update_last_access(clock)
@@ -314,6 +315,7 @@ class LruCache(Generic[KT, VT]):
metrics_collection_callback: Optional[Callable[[], None]] = None,
apply_cache_factor_from_config: bool = True,
clock: Optional[Clock] = None,
+ prune_unread_entries: bool = True,
):
"""
Args:
@@ -427,7 +429,15 @@ class LruCache(Generic[KT, VT]):
self.len = synchronized(cache_len)
def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()):
- node = _Node(list_root, key, value, weak_ref_to_self, real_clock, callbacks)
+ node = _Node(
+ list_root,
+ key,
+ value,
+ weak_ref_to_self,
+ real_clock,
+ callbacks,
+ prune_unread_entries,
+ )
cache[key] = node
if size_callback:
--
cgit 1.5.1
From 52913d56a5a2b07106774d97f4e188148d85a900 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Wed, 22 Sep 2021 09:41:42 -0400
Subject: Add documentation for experimental feature flags. (#10865)
---
changelog.d/10865.doc | 1 +
docs/SUMMARY.md | 1 +
docs/development/experimental_features.md | 37 +++++++++++++++++++++++++++++++
3 files changed, 39 insertions(+)
create mode 100644 changelog.d/10865.doc
create mode 100644 docs/development/experimental_features.md
diff --git a/changelog.d/10865.doc b/changelog.d/10865.doc
new file mode 100644
index 0000000000..deeb0eedf3
--- /dev/null
+++ b/changelog.d/10865.doc
@@ -0,0 +1 @@
+Add developer documentation about experimental configuration flags.
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index fd0045e1ef..bdb44543b8 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -74,6 +74,7 @@
- [Testing]()
- [OpenTracing](opentracing.md)
- [Database Schemas](development/database_schema.md)
+ - [Experimental features](development/experimental_features.md)
- [Synapse Architecture]()
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
diff --git a/docs/development/experimental_features.md b/docs/development/experimental_features.md
new file mode 100644
index 0000000000..d6b11496cc
--- /dev/null
+++ b/docs/development/experimental_features.md
@@ -0,0 +1,37 @@
+# Implementing experimental features in Synapse
+
+It can be desirable to implement "experimental" features which are disabled by
+default and must be explicitly enabled via the Synapse configuration. This is
+applicable for features which:
+
+* Are unstable in the Matrix spec (e.g. those defined by an MSC that has not yet been merged).
+* Developers are not confident in their use by general Synapse administrators/users
+ (e.g. a feature is incomplete, buggy, performs poorly, or needs further testing).
+
+Note that this only really applies to features which are expected to be desirable
+to a broad audience. The [module infrastructure](../modules/index.md) should
+instead be investigated for non-standard features.
+
+Guarding experimental features behind configuration flags should help with some
+of the following scenarios:
+
+* Ensure that clients do not assume that unstable features exist (failing
+ gracefully if they do not).
+* Unstable features do not become de-facto standards and can be removed
+ aggressively (since only those who have opted-in will be affected).
+* Ease finding the implementation of unstable features in Synapse (for future
+ removal or stabilization).
+* Ease testing a feature (or removal of feature) due to enabling/disabling without
+ code changes. It also becomes possible to ask for wider testing, if desired.
+
+Experimental configuration flags should be disabled by default (requiring Synapse
+administrators to explicitly opt-in), although there are situations where it makes
+sense (from a product point-of-view) to enable features by default. This is
+expected and not an issue.
+
+It is not a requirement for experimental features to be behind a configuration flag,
+but one should be used if unsure.
+
+New experimental configuration flags should be added under the `experimental`
+configuration key (see the `synapse.config.experimental` file) and either explain
+(briefly) what is being enabled, or include the MSC number.
--
cgit 1.5.1
From 9391de3f373454aeec5b5c2f01b3c576528e76fe Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Wed, 22 Sep 2021 14:43:26 +0100
Subject: Fix /initialSync error due to unhashable `RoomStreamToken` (#10827)
The deprecated /initialSync endpoint maintains a cache of responses,
using parameter values as part of the cache key. When a `from` or `to`
parameter is specified, it gets converted into a `StreamToken`, which
contains a `RoomStreamToken` and forms part of the cache key.
`RoomStreamToken`s need to be made hashable for this to work.
---
changelog.d/10827.bugfix | 1 +
synapse/storage/databases/main/stream.py | 4 +++-
synapse/types.py | 20 +++++++++++++++-----
3 files changed, 19 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/10827.bugfix
diff --git a/changelog.d/10827.bugfix b/changelog.d/10827.bugfix
new file mode 100644
index 0000000000..11a618bf82
--- /dev/null
+++ b/changelog.d/10827.bugfix
@@ -0,0 +1 @@
+Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters.
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 959f13de47..9a3b6f4acf 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -39,6 +39,8 @@ import logging
from collections import namedtuple
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple
+from frozendict import frozendict
+
from twisted.internet import defer
from synapse.api.filtering import Filter
@@ -379,7 +381,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta):
if p > min_pos
}
- return RoomStreamToken(None, min_pos, positions)
+ return RoomStreamToken(None, min_pos, frozendict(positions))
async def get_room_events_stream_for_rooms(
self,
diff --git a/synapse/types.py b/synapse/types.py
index 90168ce8fa..ed831a5c1d 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -30,6 +30,7 @@ from typing import (
)
import attr
+from frozendict import frozendict
from signedjson.key import decode_verify_key_bytes
from unpaddedbase64 import decode_base64
from zope.interface import Interface
@@ -457,6 +458,9 @@ class RoomStreamToken:
Note: The `RoomStreamToken` cannot have both a topological part and an
instance map.
+
+ For caching purposes, `RoomStreamToken`s and by extension, all their
+ attributes, must be hashable.
"""
topological = attr.ib(
@@ -466,12 +470,12 @@ class RoomStreamToken:
stream = attr.ib(type=int, validator=attr.validators.instance_of(int))
instance_map = attr.ib(
- type=Dict[str, int],
- factory=dict,
+ type="frozendict[str, int]",
+ factory=frozendict,
validator=attr.validators.deep_mapping(
key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int),
- mapping_validator=attr.validators.instance_of(dict),
+ mapping_validator=attr.validators.instance_of(frozendict),
),
)
@@ -507,7 +511,7 @@ class RoomStreamToken:
return cls(
topological=None,
stream=stream,
- instance_map=instance_map,
+ instance_map=frozendict(instance_map),
)
except Exception:
pass
@@ -540,7 +544,7 @@ class RoomStreamToken:
for instance in set(self.instance_map).union(other.instance_map)
}
- return RoomStreamToken(None, max_stream, instance_map)
+ return RoomStreamToken(None, max_stream, frozendict(instance_map))
def as_historical_tuple(self) -> Tuple[int, int]:
"""Returns a tuple of `(topological, stream)` for historical tokens.
@@ -593,6 +597,12 @@ class RoomStreamToken:
@attr.s(slots=True, frozen=True)
class StreamToken:
+ """A collection of positions within multiple streams.
+
+ For caching purposes, `StreamToken`s and by extension, all their attributes,
+ must be hashable.
+ """
+
room_key = attr.ib(
type=RoomStreamToken, validator=attr.validators.instance_of(RoomStreamToken)
)
--
cgit 1.5.1
From 6fc8be9a1b2046e69e8c6f731442887e3addeec0 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Wed, 22 Sep 2021 09:45:20 -0400
Subject: Include more information in oEmbed previews. (#10819)
* Improved titles (fall back to the author name if there's not title) and include the site name.
* Handle photo/video payloads.
* Include the original URL in the Open Graph response.
* Fix the expiration time (by properly converting from seconds to milliseconds).
---
changelog.d/10819.feature | 1 +
synapse/rest/media/v1/oembed.py | 49 ++++++++++++++++++++++++---
synapse/rest/media/v1/preview_url_resource.py | 2 +-
tests/rest/media/v1/test_url_preview.py | 30 +++++++++++-----
4 files changed, 68 insertions(+), 14 deletions(-)
create mode 100644 changelog.d/10819.feature
diff --git a/changelog.d/10819.feature b/changelog.d/10819.feature
new file mode 100644
index 0000000000..4fa95a6cc9
--- /dev/null
+++ b/changelog.d/10819.feature
@@ -0,0 +1 @@
+Improve oEmbed previews by processing the author name, photo, and video information.
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
index 8b74e72655..e04671fb95 100644
--- a/synapse/rest/media/v1/oembed.py
+++ b/synapse/rest/media/v1/oembed.py
@@ -13,7 +13,7 @@
# limitations under the License.
import logging
import urllib.parse
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, List, Optional
import attr
@@ -22,6 +22,8 @@ from synapse.types import JsonDict
from synapse.util import json_decoder
if TYPE_CHECKING:
+ from lxml import etree
+
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -31,7 +33,7 @@ logger = logging.getLogger(__name__)
class OEmbedResult:
# The Open Graph result (converted from the oEmbed result).
open_graph_result: JsonDict
- # Number of seconds to cache the content, according to the oEmbed response.
+ # Number of milliseconds to cache the content, according to the oEmbed response.
#
# This will be None if no cache-age is provided in the oEmbed response (or
# if the oEmbed response cannot be turned into an Open Graph response).
@@ -119,10 +121,22 @@ class OEmbedProvider:
# Ensure the cache age is None or an int.
cache_age = oembed.get("cache_age")
if cache_age:
- cache_age = int(cache_age)
+ cache_age = int(cache_age) * 1000
# The results.
- open_graph_response = {"og:title": oembed.get("title")}
+ open_graph_response = {
+ "og:url": url,
+ }
+
+ # Use either title or author's name as the title.
+ title = oembed.get("title") or oembed.get("author_name")
+ if title:
+ open_graph_response["og:title"] = title
+
+ # Use the provider name and as the site.
+ provider_name = oembed.get("provider_name")
+ if provider_name:
+ open_graph_response["og:site_name"] = provider_name
# If a thumbnail exists, use it. Note that dimensions will be calculated later.
if "thumbnail_url" in oembed:
@@ -137,6 +151,15 @@ class OEmbedProvider:
# If this is a photo, use the full image, not the thumbnail.
open_graph_response["og:image"] = oembed["url"]
+ elif oembed_type == "video":
+ open_graph_response["og:type"] = "video.other"
+ calc_description_and_urls(open_graph_response, oembed["html"])
+ open_graph_response["og:video:width"] = oembed["width"]
+ open_graph_response["og:video:height"] = oembed["height"]
+
+ elif oembed_type == "link":
+ open_graph_response["og:type"] = "website"
+
else:
raise RuntimeError(f"Unknown oEmbed type: {oembed_type}")
@@ -149,6 +172,14 @@ class OEmbedProvider:
return OEmbedResult(open_graph_response, cache_age)
+def _fetch_urls(tree: "etree.Element", tag_name: str) -> List[str]:
+ results = []
+ for tag in tree.xpath("//*/" + tag_name):
+ if "src" in tag.attrib:
+ results.append(tag.attrib["src"])
+ return results
+
+
def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) -> None:
"""
Calculate description for an HTML document.
@@ -179,6 +210,16 @@ def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) ->
if tree is None:
return
+ # Attempt to find interesting URLs (images, videos, embeds).
+ if "og:image" not in open_graph_response:
+ image_urls = _fetch_urls(tree, "img")
+ if image_urls:
+ open_graph_response["og:image"] = image_urls[0]
+
+ video_urls = _fetch_urls(tree, "video") + _fetch_urls(tree, "embed")
+ if video_urls:
+ open_graph_response["og:video"] = video_urls[0]
+
from synapse.rest.media.v1.preview_url_resource import _calc_description
description = _calc_description(tree)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 0a0b476d2b..9ffa983fbb 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -305,7 +305,7 @@ class PreviewUrlResource(DirectServeJsonResource):
with open(media_info.filename, "rb") as file:
body = file.read()
- oembed_response = self._oembed.parse_oembed_response(media_info.uri, body)
+ oembed_response = self._oembed.parse_oembed_response(url, body)
og = oembed_response.open_graph_result
# Use the cache age from the oEmbed result, instead of the HTTP response.
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 9d13899584..d83dfacfed 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -620,11 +620,12 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.assertIn(b"/matrixdotorg", server.data)
self.assertEqual(channel.code, 200)
- self.assertIsNone(channel.json_body["og:title"])
- self.assertTrue(channel.json_body["og:image"].startswith("mxc://"))
- self.assertEqual(channel.json_body["og:image:height"], 1)
- self.assertEqual(channel.json_body["og:image:width"], 1)
- self.assertEqual(channel.json_body["og:image:type"], "image/png")
+ body = channel.json_body
+ self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345")
+ self.assertTrue(body["og:image"].startswith("mxc://"))
+ self.assertEqual(body["og:image:height"], 1)
+ self.assertEqual(body["og:image:width"], 1)
+ self.assertEqual(body["og:image:type"], "image/png")
def test_oembed_rich(self):
"""Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
@@ -633,6 +634,8 @@ class URLPreviewTests(unittest.HomeserverTestCase):
result = {
"version": "1.0",
"type": "rich",
+ # Note that this provides the author, not the title.
+ "author_name": "Alice",
"html": "
Content Preview
",
}
end_content = json.dumps(result).encode("utf-8")
@@ -660,9 +663,14 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.pump()
self.assertEqual(channel.code, 200)
+ body = channel.json_body
self.assertEqual(
- channel.json_body,
- {"og:title": None, "og:description": "Content Preview"},
+ body,
+ {
+ "og:url": "http://twitter.com/matrixdotorg/status/12345",
+ "og:title": "Alice",
+ "og:description": "Content Preview",
+ },
)
def test_oembed_format(self):
@@ -705,7 +713,11 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.assertIn(b"format=json", server.data)
self.assertEqual(channel.code, 200)
+ body = channel.json_body
self.assertEqual(
- channel.json_body,
- {"og:title": None, "og:description": "Content Preview"},
+ body,
+ {
+ "og:url": "http://www.hulu.com/watch/12345",
+ "og:description": "Content Preview",
+ },
)
--
cgit 1.5.1
From 8f2a52766bc242c02a309f45406f827e670311e7 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 22 Sep 2021 15:20:18 +0100
Subject: Ensure we mark sent knocks as outliers (#10873)
---
changelog.d/10873.bugfix | 1 +
synapse/handlers/federation.py | 7 +++++++
2 files changed, 8 insertions(+)
create mode 100644 changelog.d/10873.bugfix
diff --git a/changelog.d/10873.bugfix b/changelog.d/10873.bugfix
new file mode 100644
index 0000000000..32b2e50fd9
--- /dev/null
+++ b/changelog.d/10873.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 8e2cf3387a..a03d77dffd 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -593,6 +593,13 @@ class FederationHandler(BaseHandler):
target_hosts, room_id, knockee, Membership.KNOCK, content, params=params
)
+ # Mark the knock as an outlier as we don't yet have the state at this point in
+ # the DAG.
+ event.internal_metadata.outlier = True
+
+ # ... but tell /sync to send it to clients anyway.
+ event.internal_metadata.out_of_band_membership = True
+
# Record the room ID and its version so that we have a record of the room
await self._maybe_store_room_on_outlier_membership(
room_id=event.room_id, room_version=event_format_version
--
cgit 1.5.1
From 03db6701d5379f4aa05037bd9ce23942c501874e Mon Sep 17 00:00:00 2001
From: Tulir Asokan
Date: Wed, 22 Sep 2021 10:31:05 -0400
Subject: Fix invalidating OTK count cache after claim (#10875)
The invalidation was missing in `_claim_e2e_one_time_key_returning`,
which is used on SQLite 3.24+ and Postgres. This could break e2ee if
nothing else happened to invalidate the caches before the keys ran out.
Signed-off-by: Tulir Asokan
---
changelog.d/10875.bugfix | 1 +
synapse/storage/databases/main/end_to_end_keys.py | 4 ++++
2 files changed, 5 insertions(+)
create mode 100644 changelog.d/10875.bugfix
diff --git a/changelog.d/10875.bugfix b/changelog.d/10875.bugfix
new file mode 100644
index 0000000000..6f370da5c7
--- /dev/null
+++ b/changelog.d/10875.bugfix
@@ -0,0 +1 @@
+Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper.
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 1f0a39eac4..a95ac34f09 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -824,6 +824,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore):
if otk_row is None:
return None
+ self._invalidate_cache_and_stream(
+ txn, self.count_e2e_one_time_keys, (user_id, device_id)
+ )
+
key_id, key_json = otk_row
return f"{algorithm}:{key_id}", key_json
--
cgit 1.5.1
From f78b68a96b1f179043b38b4109e09fa0a315643d Mon Sep 17 00:00:00 2001
From: Hillery Shay
Date: Wed, 22 Sep 2021 08:25:26 -0700
Subject: Treat "\u0000" as "\u0020" for the purposes of message search
(message indexing) (#10820)
* add test to check if null code points are being inserted
* add logic to detect and replace null code points before insertion into db
* lints
* add license to test
* change approach to null substitution
* add type hint for SearchEntry
* Add changelog entry
Signed-off-by: H.Shay
* updated changelog
* update chanelog message
* remove duplicate changelog
* Update synapse/storage/databases/main/events.py remove extra space
Co-authored-by: Patrick Cloke
* rename and move test file, update tests, delete old test file
* fix typo in comments
* update _find_highlights_in_postgres to replace null byte with space
* replace null byte in sqlite search insertion
* beef up and reorganize test for this pr
* update changelog
* add type hints and update docstring
* check db engine directly vs using env variable
* refactor tests to be less repetetive
* move rplace logic into seperate function
* requested changes
* Fix typo.
* Update synapse/storage/databases/main/search.py
Co-authored-by: reivilibre
* Update changelog.d/10820.misc
Co-authored-by: Aaron Raimist
Co-authored-by: Patrick Cloke
Co-authored-by: reivilibre
Co-authored-by: Aaron Raimist
---
changelog.d/10820.misc | 1 +
synapse/storage/databases/main/search.py | 34 +++++++++++----
tests/storage/test_room_search.py | 74 ++++++++++++++++++++++++++++++++
3 files changed, 100 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/10820.misc
create mode 100644 tests/storage/test_room_search.py
diff --git a/changelog.d/10820.misc b/changelog.d/10820.misc
new file mode 100644
index 0000000000..4373bf6f6b
--- /dev/null
+++ b/changelog.d/10820.misc
@@ -0,0 +1 @@
+Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error.
\ No newline at end of file
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 6480d5a9f5..2a1e99e17a 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -15,12 +15,12 @@
import logging
import re
from collections import namedtuple
-from typing import Collection, List, Optional, Set
+from typing import Collection, Iterable, List, Optional, Set
from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.database import DatabasePool
+from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
@@ -32,14 +32,24 @@ SearchEntry = namedtuple(
)
+def _clean_value_for_search(value: str) -> str:
+ """
+ Replaces any null code points in the string with spaces as
+ Postgres and SQLite do not like the insertion of strings with
+ null code points into the full-text search tables.
+ """
+ return value.replace("\u0000", " ")
+
+
class SearchWorkerStore(SQLBaseStore):
- def store_search_entries_txn(self, txn, entries):
+ def store_search_entries_txn(
+ self, txn: LoggingTransaction, entries: Iterable[SearchEntry]
+ ) -> None:
"""Add entries to the search table
Args:
- txn (cursor):
- entries (iterable[SearchEntry]):
- entries to be added to the table
+ txn:
+ entries: entries to be added to the table
"""
if not self.hs.config.enable_search:
return
@@ -55,7 +65,7 @@ class SearchWorkerStore(SQLBaseStore):
entry.event_id,
entry.room_id,
entry.key,
- entry.value,
+ _clean_value_for_search(entry.value),
entry.stream_ordering,
entry.origin_server_ts,
)
@@ -70,11 +80,16 @@ class SearchWorkerStore(SQLBaseStore):
" VALUES (?,?,?,?)"
)
args = (
- (entry.event_id, entry.room_id, entry.key, entry.value)
+ (
+ entry.event_id,
+ entry.room_id,
+ entry.key,
+ _clean_value_for_search(entry.value),
+ )
for entry in entries
)
-
txn.execute_batch(sql, args)
+
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
@@ -646,6 +661,7 @@ class SearchStore(SearchBackgroundUpdateStore):
for key in ("body", "name", "topic"):
v = event.content.get(key, None)
if v:
+ v = _clean_value_for_search(v)
values.append(v)
if not values:
diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py
new file mode 100644
index 0000000000..8971ecccbd
--- /dev/null
+++ b/tests/storage/test_room_search.py
@@ -0,0 +1,74 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import synapse.rest.admin
+from synapse.rest.client import login, room
+from synapse.storage.engines import PostgresEngine
+
+from tests.unittest import HomeserverTestCase
+
+
+class NullByteInsertionTest(HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def test_null_byte(self):
+ """
+ Postgres/SQLite don't like null bytes going into the search tables. Internally
+ we replace those with a space.
+
+ Ensure this doesn't break anything.
+ """
+
+ # Register a user and create a room, create some messages
+ self.register_user("alice", "password")
+ access_token = self.login("alice", "password")
+ room_id = self.helper.create_room_as("alice", tok=access_token)
+
+ # Send messages and ensure they don't cause an internal server
+ # error
+ for body in ["hi\u0000bob", "another message", "hi alice"]:
+ response = self.helper.send(room_id, body, tok=access_token)
+ self.assertIn("event_id", response)
+
+ # Check that search works for the message where the null byte was replaced
+ store = self.hs.get_datastore()
+ result = self.get_success(
+ store.search_msgs([room_id], "hi bob", ["content.body"])
+ )
+ self.assertEquals(result.get("count"), 1)
+ if isinstance(store.database_engine, PostgresEngine):
+ self.assertIn("hi", result.get("highlights"))
+ self.assertIn("bob", result.get("highlights"))
+
+ # Check that search works for an unrelated message
+ result = self.get_success(
+ store.search_msgs([room_id], "another", ["content.body"])
+ )
+ self.assertEquals(result.get("count"), 1)
+ if isinstance(store.database_engine, PostgresEngine):
+ self.assertIn("another", result.get("highlights"))
+
+ # Check that search works for a search term that overlaps with the message
+ # containing a null byte and an unrelated message.
+ result = self.get_success(store.search_msgs([room_id], "hi", ["content.body"]))
+ self.assertEquals(result.get("count"), 2)
+ result = self.get_success(
+ store.search_msgs([room_id], "hi alice", ["content.body"])
+ )
+ if isinstance(store.database_engine, PostgresEngine):
+ self.assertIn("alice", result.get("highlights"))
--
cgit 1.5.1
From 26f2bfedbf5493d8a69d1b38147b6236e7606cd3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 22 Sep 2021 17:58:57 +0100
Subject: Factor out a separate `EventContext.for_outlier` (#10883)
Constructing an EventContext for an outlier is actually really simple, and
there's no sense in going via an `async` method in the `StateHandler`.
This also means that we can resolve a bunch of FIXMEs.
---
changelog.d/10883.misc | 1 +
synapse/events/snapshot.py | 14 ++++++++++----
synapse/handlers/federation.py | 9 ++++-----
synapse/handlers/federation_event.py | 7 ++-----
synapse/state/__init__.py | 34 ++++------------------------------
5 files changed, 21 insertions(+), 44 deletions(-)
create mode 100644 changelog.d/10883.misc
diff --git a/changelog.d/10883.misc b/changelog.d/10883.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10883.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index f8d898c3b1..5ba01eeef9 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -80,9 +80,7 @@ class EventContext:
(type, state_key) -> event_id
- FIXME: what is this for an outlier? it seems ill-defined. It seems like
- it could be either {}, or the state we were given by the remote
- server, depending on $THINGS
+ For an outlier, this is {}
Note that this is a private attribute: it should be accessed via
``get_current_state_ids``. _AsyncEventContext impl calculates this
@@ -96,7 +94,7 @@ class EventContext:
(type, state_key) -> event_id
- FIXME: again, what is this for an outlier?
+ For an outlier, this is {}
As with _current_state_ids, this is a private attribute. It should be
accessed via get_prev_state_ids.
@@ -130,6 +128,14 @@ class EventContext:
delta_ids=delta_ids,
)
+ @staticmethod
+ def for_outlier():
+ """Return an EventContext instance suitable for persisting an outlier event"""
+ return EventContext(
+ current_state_ids={},
+ prev_state_ids={},
+ )
+
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index a03d77dffd..0befe9ce43 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -624,7 +624,7 @@ class FederationHandler(BaseHandler):
# in the invitee's sync stream. It is stripped out for all other local users.
event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
- context = await self.state_handler.compute_event_context(event)
+ context = EventContext.for_outlier()
stream_id = await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@@ -814,7 +814,7 @@ class FederationHandler(BaseHandler):
)
)
- context = await self.state_handler.compute_event_context(event)
+ context = EventContext.for_outlier()
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@@ -843,7 +843,7 @@ class FederationHandler(BaseHandler):
await self.federation_client.send_leave(host_list, event)
- context = await self.state_handler.compute_event_context(event)
+ context = EventContext.for_outlier()
stream_id = await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@@ -1115,8 +1115,7 @@ class FederationHandler(BaseHandler):
events_to_context = {}
for e in itertools.chain(auth_events, state):
e.internal_metadata.outlier = True
- ctx = await self.state_handler.compute_event_context(e)
- events_to_context[e.event_id] = ctx
+ events_to_context[e.event_id] = EventContext.for_outlier()
event_map = {
e.event_id: e for e in itertools.chain(auth_events, state, [event])
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 3b95beeb08..10b3fdc222 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1221,7 +1221,7 @@ class FederationEventHandler:
async def prep(ev_info: _NewEventInfo) -> EventContext:
event = ev_info.event
with nested_logging_context(suffix=event.event_id):
- res = await self._state_handler.compute_event_context(event)
+ res = EventContext.for_outlier()
res = await self._check_event_auth(
origin,
event,
@@ -1540,10 +1540,7 @@ class FederationEventHandler:
event.event_id,
auth_event.event_id,
)
- missing_auth_event_context = (
- await self._state_handler.compute_event_context(auth_event)
- )
-
+ missing_auth_event_context = EventContext.for_outlier()
missing_auth_event_context = await self._check_event_auth(
origin,
auth_event,
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 463ce58dae..c981df3f18 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -263,7 +263,9 @@ class StateHandler:
async def compute_event_context(
self, event: EventBase, old_state: Optional[Iterable[EventBase]] = None
) -> EventContext:
- """Build an EventContext structure for the event.
+ """Build an EventContext structure for a non-outlier event.
+
+ (for an outlier, call EventContext.for_outlier directly)
This works out what the current state should be for the event, and
generates a new state group if necessary.
@@ -278,35 +280,7 @@ class StateHandler:
The event context.
"""
- if event.internal_metadata.is_outlier():
- # If this is an outlier, then we know it shouldn't have any current
- # state. Certainly store.get_current_state won't return any, and
- # persisting the event won't store the state group.
-
- # FIXME: why do we populate current_state_ids? I thought the point was
- # that we weren't supposed to have any state for outliers?
- if old_state:
- prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state}
- if event.is_state():
- current_state_ids = dict(prev_state_ids)
- key = (event.type, event.state_key)
- current_state_ids[key] = event.event_id
- else:
- current_state_ids = prev_state_ids
- else:
- current_state_ids = {}
- prev_state_ids = {}
-
- # We don't store state for outliers, so we don't generate a state
- # group for it.
- context = EventContext.with_state(
- state_group=None,
- state_group_before_event=None,
- current_state_ids=current_state_ids,
- prev_state_ids=prev_state_ids,
- )
-
- return context
+ assert not event.internal_metadata.is_outlier()
#
# first of all, figure out the state before the event
--
cgit 1.5.1
From aa2c027792d04c36b17866710e95a41d31f5d99c Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 23 Sep 2021 11:59:07 +0100
Subject: Remove unnecessary parentheses around tuples returned from methods
(#10889)
---
changelog.d/10889.misc | 1 +
synapse/config/server.py | 2 +-
synapse/federation/sender/per_destination_queue.py | 4 ++--
synapse/handlers/federation.py | 2 +-
synapse/handlers/message.py | 4 ++--
synapse/handlers/receipts.py | 4 ++--
synapse/handlers/room.py | 2 +-
synapse/handlers/room_summary.py | 2 +-
synapse/handlers/typing.py | 4 ++--
synapse/http/matrixfederationclient.py | 2 +-
synapse/rest/admin/rooms.py | 4 ++--
synapse/rest/client/devices.py | 4 ++--
synapse/rest/client/password_policy.py | 4 ++--
synapse/storage/databases/main/account_data.py | 2 +-
synapse/storage/databases/main/deviceinbox.py | 6 +++---
synapse/storage/databases/main/events_worker.py | 2 +-
synapse/storage/databases/main/state_deltas.py | 2 +-
synapse/storage/databases/main/stream.py | 4 ++--
synapse/streams/config.py | 2 +-
synapse/types.py | 4 ++--
tests/test_state.py | 2 +-
tests/utils.py | 2 +-
22 files changed, 33 insertions(+), 32 deletions(-)
create mode 100644 changelog.d/10889.misc
diff --git a/changelog.d/10889.misc b/changelog.d/10889.misc
new file mode 100644
index 0000000000..6d60188f55
--- /dev/null
+++ b/changelog.d/10889.misc
@@ -0,0 +1 @@
+Clean up some unnecessary parentheses in places around the codebase.
\ No newline at end of file
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 7b9109a592..ad8715da29 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -1447,7 +1447,7 @@ def read_gc_thresholds(thresholds):
return None
try:
assert len(thresholds) == 3
- return (int(thresholds[0]), int(thresholds[1]), int(thresholds[2]))
+ return int(thresholds[0]), int(thresholds[1]), int(thresholds[2])
except Exception:
raise ConfigError(
"Value of `gc_threshold` must be a list of three integers if set"
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index c11d1f6d31..afe35e72b6 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -560,7 +560,7 @@ class PerDestinationQueue:
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
- return (edus, now_stream_id)
+ return edus, now_stream_id
async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
last_device_stream_id = self._last_device_stream_id
@@ -593,7 +593,7 @@ class PerDestinationQueue:
stream_id,
)
- return (edus, stream_id)
+ return edus, stream_id
def _start_catching_up(self) -> None:
"""
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 0befe9ce43..4523b25636 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1369,7 +1369,7 @@ class FederationHandler(BaseHandler):
builder=builder
)
EventValidator().validate_new(event, self.config)
- return (event, context)
+ return event, context
async def _check_signature(self, event: EventBase, context: EventContext) -> None:
"""
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 6cd694b2da..7a5d8e6f4e 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -666,7 +666,7 @@ class EventCreationHandler:
self.validator.validate_new(event, self.config)
- return (event, context)
+ return event, context
async def _is_exempt_from_privacy_policy(
self, builder: EventBuilder, requester: Requester
@@ -1004,7 +1004,7 @@ class EventCreationHandler:
logger.debug("Created event %s", event.event_id)
- return (event, context)
+ return event, context
@measure_func("handle_new_client_event")
async def handle_new_client_event(
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 5881f09ebd..f21f33ada2 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -238,7 +238,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
if self.config.experimental.msc2285_enabled:
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
- return (events, to_key)
+ return events, to_key
async def get_new_events_as(
self, from_key: int, service: ApplicationService
@@ -270,7 +270,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
events.append(event)
- return (events, to_key)
+ return events, to_key
def get_current_key(self, direction: str = "f") -> int:
return self.store.get_max_receipt_stream_id()
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 287ea2fd06..b5768220d9 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1235,7 +1235,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
else:
end_key = to_key
- return (events, end_key)
+ return events, end_key
def get_current_key(self) -> RoomStreamToken:
return self.store.get_room_max_token()
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 4e28fb9685..fb26ee7ad7 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -1179,4 +1179,4 @@ def _child_events_comparison_key(
order = None
# Items without an order come last.
- return (order is None, order, child.origin_server_ts, child.room_id)
+ return order is None, order, child.origin_server_ts, child.room_id
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 9326330c90..d10e9b8ec4 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -483,7 +483,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
events.append(self._make_event_for(room_id))
- return (events, handler._latest_room_serial)
+ return events, handler._latest_room_serial
async def get_new_events(
self,
@@ -507,7 +507,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
events.append(self._make_event_for(room_id))
- return (events, handler._latest_room_serial)
+ return events, handler._latest_room_serial
def get_current_key(self) -> int:
return self.get_typing_handler()._latest_room_serial
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index ef10ec0937..e56fa477bb 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -1186,7 +1186,7 @@ class MatrixFederationHttpClient:
request.method,
request.uri.decode("ascii"),
)
- return (length, headers)
+ return length, headers
def _flatten_response_never_received(e):
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 8f781f745f..a4823ca6e7 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -213,7 +213,7 @@ class RoomRestServlet(RestServlet):
members = await self.store.get_users_in_room(room_id)
ret["joined_local_devices"] = await self.store.count_devices_by_users(members)
- return (200, ret)
+ return 200, ret
async def on_DELETE(
self, request: SynapseRequest, room_id: str
@@ -668,4 +668,4 @@ async def _delete_room(
if purge:
await pagination_handler.purge_room(room_id, force=force_purge)
- return (200, ret)
+ return 200, ret
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index 25bc3c8f47..8566dc5cb5 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -211,7 +211,7 @@ class DehydratedDeviceServlet(RestServlet):
if dehydrated_device is not None:
(device_id, device_data) = dehydrated_device
result = {"device_id": device_id, "device_data": device_data}
- return (200, result)
+ return 200, result
else:
raise errors.NotFoundError("No dehydrated device available")
@@ -293,7 +293,7 @@ class ClaimDehydratedDeviceServlet(RestServlet):
submission["device_id"],
)
- return (200, result)
+ return 200, result
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
diff --git a/synapse/rest/client/password_policy.py b/synapse/rest/client/password_policy.py
index 6d64efb165..0465fd2292 100644
--- a/synapse/rest/client/password_policy.py
+++ b/synapse/rest/client/password_policy.py
@@ -40,7 +40,7 @@ class PasswordPolicyServlet(RestServlet):
def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
if not self.enabled or not self.policy:
- return (200, {})
+ return 200, {}
policy = {}
@@ -54,7 +54,7 @@ class PasswordPolicyServlet(RestServlet):
if param in self.policy:
policy["m.%s" % param] = self.policy[param]
- return (200, policy)
+ return 200, policy
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index d0cf3460da..70ca3e09f7 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -324,7 +324,7 @@ class AccountDataWorkerStore(SQLBaseStore):
user_id, int(stream_id)
)
if not changed:
- return ({}, {})
+ return {}, {}
return await self.db_pool.runInteraction(
"get_updated_account_data_for_user", get_updated_account_data_for_user_txn
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index c55508867d..3154906d45 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -136,7 +136,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
user_id, last_stream_id
)
if not has_changed:
- return ([], current_stream_id)
+ return [], current_stream_id
def get_new_messages_for_device_txn(txn):
sql = (
@@ -240,11 +240,11 @@ class DeviceInboxWorkerStore(SQLBaseStore):
)
if not has_changed or last_stream_id == current_stream_id:
log_kv({"message": "No new messages in stream"})
- return ([], current_stream_id)
+ return [], current_stream_id
if limit <= 0:
# This can happen if we run out of room for EDUs in the transaction.
- return ([], last_stream_id)
+ return [], last_stream_id
@trace
def get_new_messages_for_remote_destination_txn(txn):
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index d72e716b5c..4a1a2f4a6a 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -1495,7 +1495,7 @@ class EventsWorkerStore(SQLBaseStore):
if not res:
raise SynapseError(404, "Could not find event %s" % (event_id,))
- return (int(res["topological_ordering"]), int(res["stream_ordering"]))
+ return int(res["topological_ordering"]), int(res["stream_ordering"])
async def get_next_event_to_expire(self) -> Optional[Tuple[str, int]]:
"""Retrieve the entry with the lowest expiry timestamp in the event_expiry
diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py
index bff7d0404f..a89747d741 100644
--- a/synapse/storage/databases/main/state_deltas.py
+++ b/synapse/storage/databases/main/state_deltas.py
@@ -58,7 +58,7 @@ class StateDeltasStore(SQLBaseStore):
# if the CSDs haven't changed between prev_stream_id and now, we
# know for certain that they haven't changed between prev_stream_id and
# max_stream_id.
- return (max_stream_id, [])
+ return max_stream_id, []
def get_current_state_deltas_txn(txn):
# First we calculate the max stream id that will give us less than
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 9a3b6f4acf..dc7884b1c0 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -624,7 +624,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta):
self._set_before_and_after(events, rows)
- return (events, token)
+ return events, token
async def get_recent_event_ids_for_room(
self, room_id: str, limit: int, end_token: RoomStreamToken
@@ -1242,7 +1242,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta):
self._set_before_and_after(events, rows)
- return (events, token)
+ return events, token
@cached()
async def get_id_for_instance(self, instance_name: str) -> int:
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index cf4005984b..c08d591f29 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -81,7 +81,7 @@ class PaginationConfig:
raise SynapseError(400, "Invalid request.")
def __repr__(self) -> str:
- return ("PaginationConfig(from_tok=%r, to_tok=%r, direction=%r, limit=%r)") % (
+ return "PaginationConfig(from_tok=%r, to_tok=%r, direction=%r, limit=%r)" % (
self.from_token,
self.to_token,
self.direction,
diff --git a/synapse/types.py b/synapse/types.py
index ed831a5c1d..364ecf7d45 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -556,7 +556,7 @@ class RoomStreamToken:
"Cannot call `RoomStreamToken.as_historical_tuple` on live token"
)
- return (self.topological, self.stream)
+ return self.topological, self.stream
def get_stream_pos_for_instance(self, instance_name: str) -> int:
"""Get the stream position that the given writer was at at this token.
@@ -766,7 +766,7 @@ def get_verify_key_from_cross_signing_key(key_info):
raise ValueError("Invalid key")
# and return that one key
for key_id, key_data in keys.items():
- return (key_id, decode_verify_key_bytes(key_id, decode_base64(key_data)))
+ return key_id, decode_verify_key_bytes(key_id, decode_base64(key_data))
@attr.s(auto_attribs=True, frozen=True, slots=True)
diff --git a/tests/test_state.py b/tests/test_state.py
index e5488df1ac..76e0e8ca7f 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -106,7 +106,7 @@ class StateGroupStore:
}
async def get_state_group_delta(self, name):
- return (None, None)
+ return None, None
def register_events(self, events):
for e in events:
diff --git a/tests/utils.py b/tests/utils.py
index f3458ca88d..cf8ba5c5db 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -434,7 +434,7 @@ class MockHttpResource:
)
return code, response
except CodeMessageException as e:
- return (e.code, cs_error(e.msg, code=e.errcode))
+ return e.code, cs_error(e.msg, code=e.errcode)
raise KeyError("No event can handle %s" % path)
--
cgit 1.5.1
From e584534403b55ad3f250f92592e30b15b01f0201 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 23 Sep 2021 07:13:34 -0400
Subject: Use direct references for some configuration variables (part 3)
(#10885)
This avoids the overhead of searching through the various
configuration classes by directly referencing the class that
the attributes are in.
It also improves type hints since mypy can now resolve the
types of the configuration variables.
---
changelog.d/10885.misc | 1 +
synapse/app/homeserver.py | 2 +-
synapse/config/consent.py | 9 +++--
synapse/handlers/account_validity.py | 2 +-
synapse/handlers/appservice.py | 2 +-
synapse/handlers/auth.py | 22 ++++++------
synapse/handlers/cas.py | 8 ++---
synapse/handlers/identity.py | 12 +++----
synapse/handlers/message.py | 4 +--
synapse/handlers/password_policy.py | 4 +--
synapse/handlers/register.py | 11 +++---
synapse/handlers/ui_auth/checkers.py | 17 +++++----
synapse/module_api/__init__.py | 8 +++--
synapse/push/pusher.py | 2 +-
synapse/rest/admin/users.py | 4 +--
synapse/rest/client/account.py | 40 +++++++++++-----------
synapse/rest/client/auth.py | 10 +++---
synapse/rest/client/login.py | 4 +--
synapse/rest/client/password_policy.py | 4 +--
synapse/rest/client/register.py | 30 ++++++++--------
synapse/rest/consent/consent_resource.py | 9 ++---
synapse/rest/synapse/client/password_reset.py | 10 +++---
synapse/server_notices/consent_server_notices.py | 11 ++++--
synapse/storage/databases/main/appservice.py | 2 +-
.../storage/databases/main/monthly_active_users.py | 2 +-
synapse/storage/databases/main/registration.py | 2 +-
synapse/storage/prepare_database.py | 2 +-
synapse/storage/schema/main/delta/30/as_users.py | 2 +-
tests/rest/admin/test_room.py | 2 +-
tests/rest/client/test_login.py | 2 +-
tests/storage/test_appservice.py | 14 +++-----
tests/storage/test_cleanup_extrems.py | 2 +-
32 files changed, 137 insertions(+), 119 deletions(-)
create mode 100644 changelog.d/10885.misc
diff --git a/changelog.d/10885.misc b/changelog.d/10885.misc
new file mode 100644
index 0000000000..586a0b3a96
--- /dev/null
+++ b/changelog.d/10885.misc
@@ -0,0 +1 @@
+Use direct references to config flags.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index b909f8db8d..886e291e4c 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -195,7 +195,7 @@ class SynapseHomeServer(HomeServer):
}
)
- if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
from synapse.rest.synapse.client.password_reset import (
PasswordResetSubmitTokenResource,
)
diff --git a/synapse/config/consent.py b/synapse/config/consent.py
index b05a9bd97f..ecc43b08b9 100644
--- a/synapse/config/consent.py
+++ b/synapse/config/consent.py
@@ -13,6 +13,7 @@
# limitations under the License.
from os import path
+from typing import Optional
from synapse.config import ConfigError
@@ -78,8 +79,8 @@ class ConsentConfig(Config):
def __init__(self, *args):
super().__init__(*args)
- self.user_consent_version = None
- self.user_consent_template_dir = None
+ self.user_consent_version: Optional[str] = None
+ self.user_consent_template_dir: Optional[str] = None
self.user_consent_server_notice_content = None
self.user_consent_server_notice_to_guests = False
self.block_events_without_consent_error = None
@@ -94,7 +95,9 @@ class ConsentConfig(Config):
return
self.user_consent_version = str(consent_config["version"])
self.user_consent_template_dir = self.abspath(consent_config["template_dir"])
- if not path.isdir(self.user_consent_template_dir):
+ if not isinstance(self.user_consent_template_dir, str) or not path.isdir(
+ self.user_consent_template_dir
+ ):
raise ConfigError(
"Could not find template directory '%s'"
% (self.user_consent_template_dir,)
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 4724565ba5..5a5f124ddf 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -47,7 +47,7 @@ class AccountValidityHandler:
self.send_email_handler = self.hs.get_send_email_handler()
self.clock = self.hs.get_clock()
- self._app_name = self.hs.config.email_app_name
+ self._app_name = self.hs.config.email.email_app_name
self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index b7213b67a5..163278708c 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -52,7 +52,7 @@ class ApplicationServicesHandler:
self.scheduler = hs.get_application_service_scheduler()
self.started_scheduler = False
self.clock = hs.get_clock()
- self.notify_appservices = hs.config.notify_appservices
+ self.notify_appservices = hs.config.appservice.notify_appservices
self.event_sources = hs.get_event_sources()
self.current_max = 0
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index bcd4249e09..b747f80bc1 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -210,15 +210,15 @@ class AuthHandler(BaseHandler):
self.password_providers = [
PasswordProvider.load(module, config, account_handler)
- for module, config in hs.config.password_providers
+ for module, config in hs.config.authproviders.password_providers
]
logger.info("Extra password_providers: %s", self.password_providers)
self.hs = hs # FIXME better possibility to access registrationHandler later?
self.macaroon_gen = hs.get_macaroon_generator()
- self._password_enabled = hs.config.password_enabled
- self._password_localdb_enabled = hs.config.password_localdb_enabled
+ self._password_enabled = hs.config.auth.password_enabled
+ self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
# start out by assuming PASSWORD is enabled; we will remove it later if not.
login_types = set()
@@ -250,7 +250,7 @@ class AuthHandler(BaseHandler):
)
# The number of seconds to keep a UI auth session active.
- self._ui_auth_session_timeout = hs.config.ui_auth_session_timeout
+ self._ui_auth_session_timeout = hs.config.auth.ui_auth_session_timeout
# Ratelimitier for failed /login attempts
self._failed_login_attempts_ratelimiter = Ratelimiter(
@@ -739,19 +739,19 @@ class AuthHandler(BaseHandler):
return canonical_id
def _get_params_recaptcha(self) -> dict:
- return {"public_key": self.hs.config.recaptcha_public_key}
+ return {"public_key": self.hs.config.captcha.recaptcha_public_key}
def _get_params_terms(self) -> dict:
return {
"policies": {
"privacy_policy": {
- "version": self.hs.config.user_consent_version,
+ "version": self.hs.config.consent.user_consent_version,
"en": {
- "name": self.hs.config.user_consent_policy_name,
+ "name": self.hs.config.consent.user_consent_policy_name,
"url": "%s_matrix/consent?v=%s"
% (
self.hs.config.server.public_baseurl,
- self.hs.config.user_consent_version,
+ self.hs.config.consent.user_consent_version,
),
},
}
@@ -1016,7 +1016,7 @@ class AuthHandler(BaseHandler):
def can_change_password(self) -> bool:
"""Get whether users on this server are allowed to change or set a password.
- Both `config.password_enabled` and `config.password_localdb_enabled` must be true.
+ Both `config.auth.password_enabled` and `config.auth.password_localdb_enabled` must be true.
Note that any account (even SSO accounts) are allowed to add passwords if the above
is true.
@@ -1486,7 +1486,7 @@ class AuthHandler(BaseHandler):
pw = unicodedata.normalize("NFKC", password)
return bcrypt.hashpw(
- pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"),
+ pw.encode("utf8") + self.hs.config.auth.password_pepper.encode("utf8"),
bcrypt.gensalt(self.bcrypt_rounds),
).decode("ascii")
@@ -1510,7 +1510,7 @@ class AuthHandler(BaseHandler):
pw = unicodedata.normalize("NFKC", password)
return bcrypt.checkpw(
- pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"),
+ pw.encode("utf8") + self.hs.config.auth.password_pepper.encode("utf8"),
checked_hash,
)
diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py
index b0b188dc78..5d8f6c50a9 100644
--- a/synapse/handlers/cas.py
+++ b/synapse/handlers/cas.py
@@ -65,10 +65,10 @@ class CasHandler:
self._auth_handler = hs.get_auth_handler()
self._registration_handler = hs.get_registration_handler()
- self._cas_server_url = hs.config.cas_server_url
- self._cas_service_url = hs.config.cas_service_url
- self._cas_displayname_attribute = hs.config.cas_displayname_attribute
- self._cas_required_attributes = hs.config.cas_required_attributes
+ self._cas_server_url = hs.config.cas.cas_server_url
+ self._cas_service_url = hs.config.cas.cas_service_url
+ self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute
+ self._cas_required_attributes = hs.config.cas.cas_required_attributes
self._http_client = hs.get_proxied_http_client()
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 8b8f1f41ca..fe8a995892 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -62,7 +62,7 @@ class IdentityHandler(BaseHandler):
self.federation_http_client = hs.get_federation_http_client()
self.hs = hs
- self._web_client_location = hs.config.invite_client_location
+ self._web_client_location = hs.config.email.invite_client_location
# Ratelimiters for `/requestToken` endpoints.
self._3pid_validation_ratelimiter_ip = Ratelimiter(
@@ -419,7 +419,7 @@ class IdentityHandler(BaseHandler):
token_expires = (
self.hs.get_clock().time_msec()
- + self.hs.config.email_validation_token_lifetime
+ + self.hs.config.email.email_validation_token_lifetime
)
await self.store.start_or_continue_validation_session(
@@ -465,7 +465,7 @@ class IdentityHandler(BaseHandler):
if next_link:
params["next_link"] = next_link
- if self.hs.config.using_identity_server_from_trusted_list:
+ if self.hs.config.email.using_identity_server_from_trusted_list:
# Warn that a deprecated config option is in use
logger.warning(
'The config option "trust_identity_server_for_password_resets" '
@@ -518,7 +518,7 @@ class IdentityHandler(BaseHandler):
if next_link:
params["next_link"] = next_link
- if self.hs.config.using_identity_server_from_trusted_list:
+ if self.hs.config.email.using_identity_server_from_trusted_list:
# Warn that a deprecated config option is in use
logger.warning(
'The config option "trust_identity_server_for_password_resets" '
@@ -572,12 +572,12 @@ class IdentityHandler(BaseHandler):
validation_session = None
# Try to validate as email
- if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
# Ask our delegated email identity server
validation_session = await self.threepid_from_creds(
self.hs.config.account_threepid_delegate_email, threepid_creds
)
- elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
# Get a validated session matching these details
validation_session = await self.store.get_threepid_validation_session(
"email", client_secret, sid=sid, validated=True
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 7a5d8e6f4e..ad4e4a3d6f 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -443,7 +443,7 @@ class EventCreationHandler:
)
self._block_events_without_consent_error = (
- self.config.block_events_without_consent_error
+ self.config.consent.block_events_without_consent_error
)
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
@@ -744,7 +744,7 @@ class EventCreationHandler:
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
- if u["consent_version"] == self.config.user_consent_version:
+ if u["consent_version"] == self.config.consent.user_consent_version:
return
consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart)
diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py
index cd21efdcc6..eadd7ced09 100644
--- a/synapse/handlers/password_policy.py
+++ b/synapse/handlers/password_policy.py
@@ -27,8 +27,8 @@ logger = logging.getLogger(__name__)
class PasswordPolicyHandler:
def __init__(self, hs: "HomeServer"):
- self.policy = hs.config.password_policy
- self.enabled = hs.config.password_policy_enabled
+ self.policy = hs.config.auth.password_policy
+ self.enabled = hs.config.auth.password_policy_enabled
# Regexps for the spec'd policy parameters.
self.regexp_digit = re.compile("[0-9]")
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 1c195c65db..01c5e1385d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -97,6 +97,7 @@ class RegistrationHandler(BaseHandler):
self.ratelimiter = hs.get_registration_ratelimiter()
self.macaroon_gen = hs.get_macaroon_generator()
self._account_validity_handler = hs.get_account_validity_handler()
+ self._user_consent_version = self.hs.config.consent.user_consent_version
self._server_notices_mxid = hs.config.server_notices_mxid
self._server_name = hs.hostname
@@ -339,7 +340,7 @@ class RegistrationHandler(BaseHandler):
auth_provider=(auth_provider_id or ""),
).inc()
- if not self.hs.config.user_consent_at_registration:
+ if not self.hs.config.consent.user_consent_at_registration:
if not self.hs.config.auto_join_rooms_for_guests and make_guest:
logger.info(
"Skipping auto-join for %s because auto-join for guests is disabled",
@@ -864,7 +865,9 @@ class RegistrationHandler(BaseHandler):
await self._register_msisdn_threepid(user_id, threepid)
if auth_result and LoginType.TERMS in auth_result:
- await self._on_user_consented(user_id, self.hs.config.user_consent_version)
+ # The terms type should only exist if consent is enabled.
+ assert self._user_consent_version is not None
+ await self._on_user_consented(user_id, self._user_consent_version)
async def _on_user_consented(self, user_id: str, consent_version: str) -> None:
"""A user consented to the terms on registration
@@ -910,8 +913,8 @@ class RegistrationHandler(BaseHandler):
# getting mail spam where they weren't before if email
# notifs are set up on a homeserver)
if (
- self.hs.config.email_enable_notifs
- and self.hs.config.email_notif_for_new_users
+ self.hs.config.email.email_enable_notifs
+ and self.hs.config.email.email_notif_for_new_users
and token
):
# Pull the ID of the access token back out of the db
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index ea9325e96a..8f5d465fa1 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -82,10 +82,10 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self._enabled = bool(hs.config.recaptcha_private_key)
+ self._enabled = bool(hs.config.captcha.recaptcha_private_key)
self._http_client = hs.get_proxied_http_client()
- self._url = hs.config.recaptcha_siteverify_api
- self._secret = hs.config.recaptcha_private_key
+ self._url = hs.config.captcha.recaptcha_siteverify_api
+ self._secret = hs.config.captcha.recaptcha_private_key
def is_enabled(self) -> bool:
return self._enabled
@@ -161,12 +161,17 @@ class _BaseThreepidAuthChecker:
self.hs.config.account_threepid_delegate_msisdn, threepid_creds
)
elif medium == "email":
- if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if (
+ self.hs.config.email.threepid_behaviour_email
+ == ThreepidBehaviour.REMOTE
+ ):
assert self.hs.config.account_threepid_delegate_email
threepid = await identity_handler.threepid_from_creds(
self.hs.config.account_threepid_delegate_email, threepid_creds
)
- elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ elif (
+ self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
+ ):
threepid = None
row = await self.store.get_threepid_validation_session(
medium,
@@ -218,7 +223,7 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec
_BaseThreepidAuthChecker.__init__(self, hs)
def is_enabled(self) -> bool:
- return self.hs.config.threepid_behaviour_email in (
+ return self.hs.config.email.threepid_behaviour_email in (
ThreepidBehaviour.REMOTE,
ThreepidBehaviour.LOCAL,
)
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 174e6934a8..8ae21bc43c 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -119,14 +119,16 @@ class ModuleApi:
self.custom_template_dir = hs.config.server.custom_template_directory
try:
- app_name = self._hs.config.email_app_name
+ app_name = self._hs.config.email.email_app_name
- self._from_string = self._hs.config.email_notif_from % {"app": app_name}
+ self._from_string = self._hs.config.email.email_notif_from % {
+ "app": app_name
+ }
except (KeyError, TypeError):
# If substitution failed (which can happen if the string contains
# placeholders other than just "app", or if the type of the placeholder is
# not a string), fall back to the bare strings.
- self._from_string = self._hs.config.email_notif_from
+ self._from_string = self._hs.config.email.email_notif_from
self._raw_from = email.utils.parseaddr(self._from_string)[1]
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index 29ed346d37..b57e094091 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -77,4 +77,4 @@ class PusherFactory:
if isinstance(brand, str):
return brand
- return self.config.email_app_name
+ return self.config.email.email_app_name
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 681e491826..46bfec4623 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -368,8 +368,8 @@ class UserRestServletV2(RestServlet):
user_id, medium, address, current_time
)
if (
- self.hs.config.email_enable_notifs
- and self.hs.config.email_notif_for_new_users
+ self.hs.config.email.email_enable_notifs
+ and self.hs.config.email.email_notif_for_new_users
):
await self.pusher_pool.add_pusher(
user_id=user_id,
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index aefaaa8ae8..6a7608d60b 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -64,17 +64,17 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
self.config = hs.config
self.identity_handler = hs.get_identity_handler()
- if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self.mailer = Mailer(
hs=self.hs,
- app_name=self.config.email_app_name,
- template_html=self.config.email_password_reset_template_html,
- template_text=self.config.email_password_reset_template_text,
+ app_name=self.config.email.email_app_name,
+ template_html=self.config.email.email_password_reset_template_html,
+ template_text=self.config.email.email_password_reset_template_text,
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.local_threepid_handling_disabled_due_to_email_config:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"User password resets have been disabled due to lack of email config"
)
@@ -129,7 +129,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
- if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
assert self.hs.config.account_threepid_delegate_email
# Have the configured identity server handle the request
@@ -349,17 +349,17 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.store = self.hs.get_datastore()
- if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self.mailer = Mailer(
hs=self.hs,
- app_name=self.config.email_app_name,
- template_html=self.config.email_add_threepid_template_html,
- template_text=self.config.email_add_threepid_template_text,
+ app_name=self.config.email.email_app_name,
+ template_html=self.config.email.email_add_threepid_template_html,
+ template_text=self.config.email.email_add_threepid_template_text,
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.local_threepid_handling_disabled_due_to_email_config:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"Adding emails have been disabled due to lack of an email config"
)
@@ -413,7 +413,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
assert self.hs.config.account_threepid_delegate_email
# Have the configured identity server handle the request
@@ -534,21 +534,21 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastore()
- if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self._failure_email_template = (
- self.config.email_add_threepid_template_failure_html
+ self.config.email.email_add_threepid_template_failure_html
)
async def on_GET(self, request: Request) -> None:
- if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.local_threepid_handling_disabled_due_to_email_config:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"Adding emails have been disabled due to lack of an email config"
)
raise SynapseError(
400, "Adding an email to your account is disabled on this server"
)
- elif self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
raise SynapseError(
400,
"This homeserver is not validating threepids. Use an identity server "
@@ -575,7 +575,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
return None
# Otherwise show the success template
- html = self.config.email_add_threepid_template_success_html_content
+ html = self.config.email.email_add_threepid_template_success_html_content
status_code = 200
except ThreepidValidationError as e:
status_code = e.code
diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index 7bb7801472..282861fae2 100644
--- a/synapse/rest/client/auth.py
+++ b/synapse/rest/client/auth.py
@@ -47,7 +47,7 @@ class AuthRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
self.registration_handler = hs.get_registration_handler()
- self.recaptcha_template = hs.config.recaptcha_template
+ self.recaptcha_template = hs.config.captcha.recaptcha_template
self.terms_template = hs.config.terms_template
self.registration_token_template = hs.config.registration_token_template
self.success_template = hs.config.fallback_success_template
@@ -62,7 +62,7 @@ class AuthRestServlet(RestServlet):
session=session,
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.RECAPTCHA),
- sitekey=self.hs.config.recaptcha_public_key,
+ sitekey=self.hs.config.captcha.recaptcha_public_key,
)
elif stagetype == LoginType.TERMS:
html = self.terms_template.render(
@@ -70,7 +70,7 @@ class AuthRestServlet(RestServlet):
terms_url="%s_matrix/consent?v=%s"
% (
self.hs.config.server.public_baseurl,
- self.hs.config.user_consent_version,
+ self.hs.config.consent.user_consent_version,
),
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.TERMS),
@@ -118,7 +118,7 @@ class AuthRestServlet(RestServlet):
session=session,
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.RECAPTCHA),
- sitekey=self.hs.config.recaptcha_public_key,
+ sitekey=self.hs.config.captcha.recaptcha_public_key,
error=e.msg,
)
else:
@@ -139,7 +139,7 @@ class AuthRestServlet(RestServlet):
terms_url="%s_matrix/consent?v=%s"
% (
self.hs.config.server.public_baseurl,
- self.hs.config.user_consent_version,
+ self.hs.config.consent.user_consent_version,
),
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.TERMS),
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index a6ede7e2f3..d766e98dce 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -77,7 +77,7 @@ class LoginRestServlet(RestServlet):
# SSO configuration.
self.saml2_enabled = hs.config.saml2_enabled
- self.cas_enabled = hs.config.cas_enabled
+ self.cas_enabled = hs.config.cas.cas_enabled
self.oidc_enabled = hs.config.oidc_enabled
self._msc2918_enabled = hs.config.access_token_lifetime is not None
@@ -559,7 +559,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
if hs.config.access_token_lifetime is not None:
RefreshTokenServlet(hs).register(http_server)
SsoRedirectServlet(hs).register(http_server)
- if hs.config.cas_enabled:
+ if hs.config.cas.cas_enabled:
CasTicketServlet(hs).register(http_server)
diff --git a/synapse/rest/client/password_policy.py b/synapse/rest/client/password_policy.py
index 0465fd2292..9f1908004b 100644
--- a/synapse/rest/client/password_policy.py
+++ b/synapse/rest/client/password_policy.py
@@ -35,8 +35,8 @@ class PasswordPolicyServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
- self.policy = hs.config.password_policy
- self.enabled = hs.config.password_policy_enabled
+ self.policy = hs.config.auth.password_policy
+ self.enabled = hs.config.auth.password_policy_enabled
def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
if not self.enabled or not self.policy:
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index abe4d7e205..48b0062cf4 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -75,17 +75,19 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.config = hs.config
- if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self.mailer = Mailer(
hs=self.hs,
- app_name=self.config.email_app_name,
- template_html=self.config.email_registration_template_html,
- template_text=self.config.email_registration_template_text,
+ app_name=self.config.email.email_app_name,
+ template_html=self.config.email.email_registration_template_html,
+ template_text=self.config.email.email_registration_template_text,
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
+ if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if (
+ self.hs.config.email.local_threepid_handling_disabled_due_to_email_config
+ ):
logger.warning(
"Email registration has been disabled due to lack of email config"
)
@@ -137,7 +139,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
assert self.hs.config.account_threepid_delegate_email
# Have the configured identity server handle the request
@@ -259,9 +261,9 @@ class RegistrationSubmitTokenServlet(RestServlet):
self.clock = hs.get_clock()
self.store = hs.get_datastore()
- if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self._failure_email_template = (
- self.config.email_registration_template_failure_html
+ self.config.email.email_registration_template_failure_html
)
async def on_GET(self, request: Request, medium: str) -> None:
@@ -269,8 +271,8 @@ class RegistrationSubmitTokenServlet(RestServlet):
raise SynapseError(
400, "This medium is currently not supported for registration"
)
- if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.local_threepid_handling_disabled_due_to_email_config:
+ if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"User registration via email has been disabled due to lack of email config"
)
@@ -303,7 +305,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
return None
# Otherwise show the success template
- html = self.config.email_registration_template_success_html_content
+ html = self.config.email.email_registration_template_success_html_content
status_code = 200
except ThreepidValidationError as e:
status_code = e.code
@@ -897,12 +899,12 @@ def _calculate_registration_flows(
flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY])
# Prepend m.login.terms to all flows if we're requiring consent
- if config.user_consent_at_registration:
+ if config.consent.user_consent_at_registration:
for flow in flows:
flow.insert(0, LoginType.TERMS)
# Prepend recaptcha to all flows if we're requiring captcha
- if config.enable_registration_captcha:
+ if config.captcha.enable_registration_captcha:
for flow in flows:
flow.insert(0, LoginType.RECAPTCHA)
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index 06e0fbde22..fc634a492d 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -84,14 +84,15 @@ class ConsentResource(DirectServeHtmlResource):
# this is required by the request_handler wrapper
self.clock = hs.get_clock()
- self._default_consent_version = hs.config.user_consent_version
- if self._default_consent_version is None:
+ # Consent must be configured to create this resource.
+ default_consent_version = hs.config.consent.user_consent_version
+ consent_template_directory = hs.config.consent.user_consent_template_dir
+ if default_consent_version is None or consent_template_directory is None:
raise ConfigError(
"Consent resource is enabled but user_consent section is "
"missing in config file."
)
-
- consent_template_directory = hs.config.user_consent_template_dir
+ self._default_consent_version = default_consent_version
# TODO: switch to synapse.util.templates.build_jinja_env
loader = jinja2.FileSystemLoader(consent_template_directory)
diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py
index f2800bf2db..28a67f04e3 100644
--- a/synapse/rest/synapse/client/password_reset.py
+++ b/synapse/rest/synapse/client/password_reset.py
@@ -47,20 +47,20 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource):
self.store = hs.get_datastore()
self._local_threepid_handling_disabled_due_to_email_config = (
- hs.config.local_threepid_handling_disabled_due_to_email_config
+ hs.config.email.local_threepid_handling_disabled_due_to_email_config
)
self._confirmation_email_template = (
- hs.config.email_password_reset_template_confirmation_html
+ hs.config.email.email_password_reset_template_confirmation_html
)
self._email_password_reset_template_success_html = (
- hs.config.email_password_reset_template_success_html_content
+ hs.config.email.email_password_reset_template_success_html_content
)
self._failure_email_template = (
- hs.config.email_password_reset_template_failure_html
+ hs.config.email.email_password_reset_template_failure_html
)
# This resource should not be mounted if threepid behaviour is not LOCAL
- assert hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL
+ assert hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]:
sid = parse_string(request, "sid", required=True)
diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py
index 4e0f814035..e09a25591f 100644
--- a/synapse/server_notices/consent_server_notices.py
+++ b/synapse/server_notices/consent_server_notices.py
@@ -36,9 +36,11 @@ class ConsentServerNotices:
self._users_in_progress: Set[str] = set()
- self._current_consent_version = hs.config.user_consent_version
- self._server_notice_content = hs.config.user_consent_server_notice_content
- self._send_to_guests = hs.config.user_consent_server_notice_to_guests
+ self._current_consent_version = hs.config.consent.user_consent_version
+ self._server_notice_content = (
+ hs.config.consent.user_consent_server_notice_content
+ )
+ self._send_to_guests = hs.config.consent.user_consent_server_notice_to_guests
if self._server_notice_content is not None:
if not self._server_notices_manager.is_enabled():
@@ -63,6 +65,9 @@ class ConsentServerNotices:
# not enabled
return
+ # A consent version must be given.
+ assert self._current_consent_version is not None
+
# make sure we don't send two messages to the same user at once
if user_id in self._users_in_progress:
return
diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py
index e2d1b758bd..2da2659f41 100644
--- a/synapse/storage/databases/main/appservice.py
+++ b/synapse/storage/databases/main/appservice.py
@@ -60,7 +60,7 @@ def _make_exclusive_regex(
class ApplicationServiceWorkerStore(SQLBaseStore):
def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
self.services_cache = load_appservices(
- hs.hostname, hs.config.app_service_config_files
+ hs.hostname, hs.config.appservice.app_service_config_files
)
self.exclusive_user_regex = _make_exclusive_regex(self.services_cache)
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index d213b26703..b76ee51a9b 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -63,7 +63,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
"""Generates current count of monthly active users broken down by service.
A service is typically an appservice but also includes native matrix users.
Since the `monthly_active_users` table is populated from the `user_ips` table
- `config.track_appservice_user_ips` must be set to `true` for this
+ `config.appservice.track_appservice_user_ips` must be set to `true` for this
method to return anything other than native matrix users.
Returns:
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index fafadb88fc..52ef9deede 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -388,7 +388,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
"get_users_expiring_soon",
select_users_txn,
self._clock.time_msec(),
- self.config.account_validity_renew_at,
+ self.config.account_validity.account_validity_renew_at,
)
async def set_renewal_mail_status(self, user_id: str, email_sent: bool) -> None:
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index d4754c904c..f31880b8ec 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -545,7 +545,7 @@ def _apply_module_schemas(
database_engine:
config: application config
"""
- for (mod, _config) in config.password_providers:
+ for (mod, _config) in config.authproviders.password_providers:
if not hasattr(mod, "get_db_schema_files"):
continue
modname = ".".join((mod.__module__, mod.__name__))
diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py
index 8a1f340083..22a7901e15 100644
--- a/synapse/storage/schema/main/delta/30/as_users.py
+++ b/synapse/storage/schema/main/delta/30/as_users.py
@@ -33,7 +33,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
config_files = []
try:
- config_files = config.app_service_config_files
+ config_files = config.appservice.app_service_config_files
except AttributeError:
logger.warning("Could not get app_service_config_files from config")
pass
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index e798513ac1..0fa55e03b4 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -47,7 +47,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.event_creation_handler = hs.get_event_creation_handler()
- hs.config.user_consent_version = "1"
+ hs.config.consent.user_consent_version = "1"
consent_uri_builder = Mock()
consent_uri_builder.build_user_consent_uri.return_value = "http://example.com"
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index f5c195a075..414c8781a9 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -97,7 +97,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
self.hs.config.enable_registration = True
self.hs.config.registrations_require_3pid = []
self.hs.config.auto_join_rooms = []
- self.hs.config.enable_registration_captcha = False
+ self.hs.config.captcha.enable_registration_captcha = False
return self.hs
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 666bffe257..ebadf47948 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -41,9 +41,8 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
self.addCleanup, federation_sender=Mock(), federation_client=Mock()
)
- hs.config.app_service_config_files = self.as_yaml_files
+ hs.config.appservice.app_service_config_files = self.as_yaml_files
hs.config.caches.event_cache_size = 1
- hs.config.password_providers = []
self.as_token = "token1"
self.as_url = "some_url"
@@ -108,9 +107,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
self.addCleanup, federation_sender=Mock(), federation_client=Mock()
)
- hs.config.app_service_config_files = self.as_yaml_files
+ hs.config.appservice.app_service_config_files = self.as_yaml_files
hs.config.caches.event_cache_size = 1
- hs.config.password_providers = []
self.as_list = [
{"token": "token1", "url": "https://matrix-as.org", "id": "id_1"},
@@ -496,9 +494,8 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
self.addCleanup, federation_sender=Mock(), federation_client=Mock()
)
- hs.config.app_service_config_files = [f1, f2]
+ hs.config.appservice.app_service_config_files = [f1, f2]
hs.config.caches.event_cache_size = 1
- hs.config.password_providers = []
database = hs.get_datastores().databases[0]
ApplicationServiceStore(
@@ -514,7 +511,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
self.addCleanup, federation_sender=Mock(), federation_client=Mock()
)
- hs.config.app_service_config_files = [f1, f2]
+ hs.config.appservice.app_service_config_files = [f1, f2]
hs.config.caches.event_cache_size = 1
hs.config.password_providers = []
@@ -540,9 +537,8 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
self.addCleanup, federation_sender=Mock(), federation_client=Mock()
)
- hs.config.app_service_config_files = [f1, f2]
+ hs.config.appservice.app_service_config_files = [f1, f2]
hs.config.caches.event_cache_size = 1
- hs.config.password_providers = []
with self.assertRaises(ConfigError) as cm:
database = hs.get_datastores().databases[0]
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index da98733ce8..7cc5e621ba 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -258,7 +258,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
info, _ = self.get_success(self.room_creator.create_room(self.requester, {}))
self.room_id = info["room_id"]
self.event_creator = homeserver.get_event_creation_handler()
- homeserver.config.user_consent_version = self.CONSENT_VERSION
+ homeserver.config.consent.user_consent_version = self.CONSENT_VERSION
def test_send_dummy_event(self):
self._create_extremity_rich_graph()
--
cgit 1.5.1
From dcfd8649704bd0a05bfbffdd96d60fc2b1913a2f Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Thu, 23 Sep 2021 13:02:13 +0100
Subject: Fix reactivated users not being added to the user directory (#10782)
Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Co-authored-by: reivilibre
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10782.bugfix | 1 +
synapse/handlers/deactivate_account.py | 9 +++++---
tests/handlers/test_user_directory.py | 42 +++++++++++++++++++++++++++++++++-
3 files changed, 48 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/10782.bugfix
diff --git a/changelog.d/10782.bugfix b/changelog.d/10782.bugfix
new file mode 100644
index 0000000000..3e410447cc
--- /dev/null
+++ b/changelog.d/10782.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory.
\ No newline at end of file
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index a03ff9842b..9ae5b7750e 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -255,13 +255,16 @@ class DeactivateAccountHandler(BaseHandler):
Args:
user_id: ID of user to be re-activated
"""
- # Add the user to the directory, if necessary.
user = UserID.from_string(user_id)
- profile = await self.store.get_profileinfo(user.localpart)
- await self.user_directory_handler.handle_local_profile_change(user_id, profile)
# Ensure the user is not marked as erased.
await self.store.mark_user_not_erased(user_id)
# Mark the user as active.
await self.store.set_user_deactivated_status(user_id, False)
+
+ # Add the user to the directory, if necessary. Note that
+ # this must be done after the user is re-activated, because
+ # deactivated users are excluded from the user directory.
+ profile = await self.store.get_profileinfo(user.localpart)
+ await self.user_directory_handler.handle_local_profile_change(user_id, profile)
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index ae88ed89aa..f3684c34a2 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
+from urllib.parse import quote
from twisted.internet import defer
@@ -20,6 +21,7 @@ from synapse.api.constants import UserTypes
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.rest.client import login, room, user_directory
from synapse.storage.roommember import ProfileInfo
+from synapse.types import create_requester
from tests import unittest
from tests.unittest import override_config
@@ -32,7 +34,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
- synapse.rest.admin.register_servlets_for_client_rest_resource,
+ synapse.rest.admin.register_servlets,
room.register_servlets,
]
@@ -130,6 +132,44 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
self.store.remove_from_user_dir.called_once_with(r_user_id)
+ def test_reactivation_makes_regular_user_searchable(self):
+ user = self.register_user("regular", "pass")
+ user_token = self.login(user, "pass")
+ admin_user = self.register_user("admin", "pass", admin=True)
+ admin_token = self.login(admin_user, "pass")
+
+ # Ensure the regular user is publicly visible and searchable.
+ self.helper.create_room_as(user, is_public=True, tok=user_token)
+ s = self.get_success(self.handler.search_users(admin_user, user, 10))
+ self.assertEqual(len(s["results"]), 1)
+ self.assertEqual(s["results"][0]["user_id"], user)
+
+ # Deactivate the user and check they're not searchable.
+ deactivate_handler = self.hs.get_deactivate_account_handler()
+ self.get_success(
+ deactivate_handler.deactivate_account(
+ user, erase_data=False, requester=create_requester(admin_user)
+ )
+ )
+ s = self.get_success(self.handler.search_users(admin_user, user, 10))
+ self.assertEqual(s["results"], [])
+
+ # Reactivate the user
+ channel = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v2/users/{quote(user)}",
+ access_token=admin_token,
+ content={"deactivated": False, "password": "pass"},
+ )
+ self.assertEqual(channel.code, 200)
+ user_token = self.login(user, "pass")
+ self.helper.create_room_as(user, is_public=True, tok=user_token)
+
+ # Check they're searchable.
+ s = self.get_success(self.handler.search_users(admin_user, user, 10))
+ self.assertEqual(len(s["results"]), 1)
+ self.assertEqual(s["results"][0]["user_id"], user)
+
def test_private_room(self):
"""
A user can be searched for only by people that are either in a public
--
cgit 1.5.1
From a10988983a1cd145fc5ae57c9a00ea95fbaece61 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Thu, 23 Sep 2021 14:45:32 +0100
Subject: Break down cache expiry reasons in grafana (#10880)
A follow-up to #10829
---
changelog.d/10880.misc | 1 +
contrib/grafana/synapse.json | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/10880.misc
diff --git a/changelog.d/10880.misc b/changelog.d/10880.misc
new file mode 100644
index 0000000000..5f58d6198c
--- /dev/null
+++ b/changelog.d/10880.misc
@@ -0,0 +1 @@
+Break down Grafana's cache expiry time series based on reason for eviction---see #10829.
\ No newline at end of file
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index ed1e8ba7f8..2c839c30d0 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -6785,7 +6785,7 @@
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
- "legendFormat": "{{name}} {{job}}-{{index}}",
+ "legendFormat": "{{name}} ({{reason}}) {{job}}-{{index}}",
"refId": "A"
}
],
@@ -10888,5 +10888,5 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
- "version": 99
+ "version": 100
}
\ No newline at end of file
--
cgit 1.5.1
From 47854c71e9bded2c446a251f3ef16f4d5da96ebe Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 23 Sep 2021 12:03:01 -0400
Subject: Use direct references for configuration variables (part 4). (#10893)
---
changelog.d/10893.misc | 1 +
synapse/api/urls.py | 4 ++--
synapse/app/_base.py | 6 ++++--
synapse/app/admin_cmd.py | 2 +-
synapse/app/generic_worker.py | 4 ++--
synapse/app/homeserver.py | 10 +++++-----
synapse/app/phone_stats_home.py | 8 +++++---
synapse/config/logger.py | 2 +-
synapse/federation/transport/server/_base.py | 4 +++-
synapse/groups/groups_server.py | 6 +++---
synapse/handlers/auth.py | 2 +-
synapse/handlers/oidc.py | 2 +-
synapse/handlers/profile.py | 2 +-
synapse/http/matrixfederationclient.py | 5 +++--
synapse/push/httppusher.py | 4 +++-
synapse/rest/client/login.py | 12 ++++++------
synapse/rest/consent/consent_resource.py | 4 ++--
synapse/rest/key/v2/local_key_resource.py | 10 +++++-----
synapse/rest/key/v2/remote_key_resource.py | 6 ++++--
synapse/rest/media/v1/media_repository.py | 4 +++-
synapse/rest/synapse/client/__init__.py | 2 +-
synapse/storage/databases/main/roommember.py | 2 +-
tests/api/test_auth.py | 4 ++--
tests/app/test_phone_stats_home.py | 2 +-
tests/config/test_load.py | 10 +++++-----
tests/config/test_ratelimiting.py | 2 +-
tests/handlers/test_auth.py | 2 +-
tests/replication/_base.py | 2 +-
tests/rest/client/test_login.py | 12 ++++++------
tests/rest/client/test_register.py | 2 +-
tests/storage/test_appservice.py | 1 -
tests/util/test_ratelimitutils.py | 2 +-
32 files changed, 77 insertions(+), 64 deletions(-)
create mode 100644 changelog.d/10893.misc
diff --git a/changelog.d/10893.misc b/changelog.d/10893.misc
new file mode 100644
index 0000000000..586a0b3a96
--- /dev/null
+++ b/changelog.d/10893.misc
@@ -0,0 +1 @@
+Use direct references to config flags.
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index d3270cd6d2..032c69b210 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -39,12 +39,12 @@ class ConsentURIBuilder:
Args:
hs_config (synapse.config.homeserver.HomeServerConfig):
"""
- if hs_config.form_secret is None:
+ if hs_config.key.form_secret is None:
raise ConfigError("form_secret not set in config")
if hs_config.server.public_baseurl is None:
raise ConfigError("public_baseurl not set in config")
- self._hmac_secret = hs_config.form_secret.encode("utf-8")
+ self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
self._public_baseurl = hs_config.server.public_baseurl
def build_user_consent_uri(self, user_id):
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index d1aa2e7fb5..f657f11f76 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -424,12 +424,14 @@ def setup_sentry(hs):
hs (synapse.server.HomeServer)
"""
- if not hs.config.sentry_enabled:
+ if not hs.config.metrics.sentry_enabled:
return
import sentry_sdk
- sentry_sdk.init(dsn=hs.config.sentry_dsn, release=get_version_string(synapse))
+ sentry_sdk.init(
+ dsn=hs.config.metrics.sentry_dsn, release=get_version_string(synapse)
+ )
# We set some default tags that give some context to this instance
with sentry_sdk.configure_scope() as scope:
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 5e956b1e27..259d5ec7cc 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -192,7 +192,7 @@ def start(config_options):
):
# Since we're meant to be run as a "command" let's not redirect stdio
# unless we've actually set log config.
- config.no_redirect_stdio = True
+ config.logging.no_redirect_stdio = True
# Explicitly disable background processes
config.update_user_directory = False
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 33afd59c72..e0776689ce 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -395,7 +395,7 @@ class GenericWorkerServer(HomeServer):
manhole_globals={"hs": self},
)
elif listener.type == "metrics":
- if not self.config.enable_metrics:
+ if not self.config.metrics.enable_metrics:
logger.warning(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -488,7 +488,7 @@ def start(config_options):
register_start(_base.start, hs)
# redirect stdio to the logs, if configured.
- if not hs.config.no_redirect_stdio:
+ if not hs.config.logging.no_redirect_stdio:
redirect_stdio_to_logs()
_base.start_worker_reactor("synapse-generic-worker", config)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 886e291e4c..f1769f146b 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -269,7 +269,7 @@ class SynapseHomeServer(HomeServer):
# https://twistedmatrix.com/trac/ticket/7678
resources[WEB_CLIENT_PREFIX] = File(webclient_loc)
- if name == "metrics" and self.config.enable_metrics:
+ if name == "metrics" and self.config.metrics.enable_metrics:
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
if name == "replication":
@@ -278,7 +278,7 @@ class SynapseHomeServer(HomeServer):
return resources
def start_listening(self):
- if self.config.redis_enabled:
+ if self.config.redis.redis_enabled:
# If redis is enabled we connect via the replication command handler
# in the same way as the workers (since we're effectively a client
# rather than a server).
@@ -305,7 +305,7 @@ class SynapseHomeServer(HomeServer):
for s in services:
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
elif listener.type == "metrics":
- if not self.config.enable_metrics:
+ if not self.config.metrics.enable_metrics:
logger.warning(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -366,7 +366,7 @@ def setup(config_options):
async def start():
# Load the OIDC provider metadatas, if OIDC is enabled.
- if hs.config.oidc_enabled:
+ if hs.config.oidc.oidc_enabled:
oidc = hs.get_oidc_handler()
# Loading the provider metadata also ensures the provider config is valid.
await oidc.load_metadata()
@@ -455,7 +455,7 @@ def main():
hs = setup(sys.argv[1:])
# redirect stdio to the logs, if configured.
- if not hs.config.no_redirect_stdio:
+ if not hs.config.logging.no_redirect_stdio:
redirect_stdio_to_logs()
run(hs)
diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
index 4a95da90f9..49e7a45e5c 100644
--- a/synapse/app/phone_stats_home.py
+++ b/synapse/app/phone_stats_home.py
@@ -131,10 +131,12 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
log_level = synapse_logger.getEffectiveLevel()
stats["log_level"] = logging.getLevelName(log_level)
- logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
+ logger.info(
+ "Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats)
+ )
try:
await hs.get_proxied_http_client().put_json(
- hs.config.report_stats_endpoint, stats
+ hs.config.metrics.report_stats_endpoint, stats
)
except Exception as e:
logger.warning("Error reporting stats: %s", e)
@@ -188,7 +190,7 @@ def start_phone_stats_home(hs):
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
# End of monthly active user settings
- if hs.config.report_stats:
+ if hs.config.metrics.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index aca9d467e6..bf8ca7d5fe 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -322,7 +322,7 @@ def setup_logging(
"""
log_config_path = (
- config.worker_log_config if use_worker_options else config.log_config
+ config.worker_log_config if use_worker_options else config.logging.log_config
)
# Perform one-time logging configuration.
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 624c859f1e..cef65929c5 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -49,7 +49,9 @@ class Authenticator:
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self.store = hs.get_datastore()
- self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+ self.federation_domain_whitelist = (
+ hs.config.federation.federation_domain_whitelist
+ )
self.notifier = hs.get_notifier()
self.replication_client = None
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index d6b75ac27f..449bbc7004 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -847,16 +847,16 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
UserID.from_string(requester_user_id)
)
if not is_admin:
- if not self.hs.config.enable_group_creation:
+ if not self.hs.config.groups.enable_group_creation:
raise SynapseError(
403, "Only a server admin can create groups on this server"
)
localpart = group_id_obj.localpart
- if not localpart.startswith(self.hs.config.group_creation_prefix):
+ if not localpart.startswith(self.hs.config.groups.group_creation_prefix):
raise SynapseError(
400,
"Can only create groups with prefix %r on this server"
- % (self.hs.config.group_creation_prefix,),
+ % (self.hs.config.groups.group_creation_prefix,),
)
profile = content.get("profile", {})
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index b747f80bc1..0f80dfdc43 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -1802,7 +1802,7 @@ class MacaroonGenerator:
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server.server_name,
identifier="key",
- key=self.hs.config.macaroon_secret_key,
+ key=self.hs.config.key.macaroon_secret_key,
)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index aed5a40a78..3665d91513 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -277,7 +277,7 @@ class OidcProvider:
self._token_generator = token_generator
self._config = provider
- self._callback_url: str = hs.config.oidc_callback_url
+ self._callback_url: str = hs.config.oidc.oidc_callback_url
# Calculate the prefix for OIDC callback paths based on the public_baseurl.
# We'll insert this into the Path= parameter of any session cookies we set.
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index f06070bfcf..b23a1541bc 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -309,7 +309,7 @@ class ProfileHandler(BaseHandler):
async def on_profile_query(self, args: JsonDict) -> JsonDict:
"""Handles federation profile query requests."""
- if not self.hs.config.allow_profile_lookup_over_federation:
+ if not self.hs.config.federation.allow_profile_lookup_over_federation:
raise SynapseError(
403,
"Profile lookup over federation is disabled on this homeserver",
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index e56fa477bb..cdc36b8d25 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -465,8 +465,9 @@ class MatrixFederationHttpClient:
_sec_timeout = self.default_timeout
if (
- self.hs.config.federation_domain_whitelist is not None
- and request.destination not in self.hs.config.federation_domain_whitelist
+ self.hs.config.federation.federation_domain_whitelist is not None
+ and request.destination
+ not in self.hs.config.federation.federation_domain_whitelist
):
raise FederationDeniedError(request.destination)
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 065948f982..eac65572b2 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -73,7 +73,9 @@ class HttpPusher(Pusher):
self.failing_since = pusher_config.failing_since
self.timed_call: Optional[IDelayedCall] = None
self._is_processing = False
- self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
+ self._group_unread_count_by_room = (
+ hs.config.push.push_group_unread_count_by_room
+ )
self._pusherpool = hs.get_pusherpool()
self.data = pusher_config.data
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index d766e98dce..64446fc486 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -69,16 +69,16 @@ class LoginRestServlet(RestServlet):
self.hs = hs
# JWT configuration variables.
- self.jwt_enabled = hs.config.jwt_enabled
- self.jwt_secret = hs.config.jwt_secret
- self.jwt_algorithm = hs.config.jwt_algorithm
- self.jwt_issuer = hs.config.jwt_issuer
- self.jwt_audiences = hs.config.jwt_audiences
+ self.jwt_enabled = hs.config.jwt.jwt_enabled
+ self.jwt_secret = hs.config.jwt.jwt_secret
+ self.jwt_algorithm = hs.config.jwt.jwt_algorithm
+ self.jwt_issuer = hs.config.jwt.jwt_issuer
+ self.jwt_audiences = hs.config.jwt.jwt_audiences
# SSO configuration.
self.saml2_enabled = hs.config.saml2_enabled
self.cas_enabled = hs.config.cas.cas_enabled
- self.oidc_enabled = hs.config.oidc_enabled
+ self.oidc_enabled = hs.config.oidc.oidc_enabled
self._msc2918_enabled = hs.config.access_token_lifetime is not None
self.auth = hs.get_auth()
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index fc634a492d..3d2afacc50 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -100,13 +100,13 @@ class ConsentResource(DirectServeHtmlResource):
loader=loader, autoescape=jinja2.select_autoescape(["html", "htm", "xml"])
)
- if hs.config.form_secret is None:
+ if hs.config.key.form_secret is None:
raise ConfigError(
"Consent resource is enabled but form_secret is not set in "
"config file. It should be set to an arbitrary secret string."
)
- self._hmac_secret = hs.config.form_secret.encode("utf-8")
+ self._hmac_secret = hs.config.key.form_secret.encode("utf-8")
async def _async_render_GET(self, request: Request) -> None:
version = parse_string(request, "v", default=self._default_consent_version)
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index ebe243bcfd..12b3ae120c 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -70,19 +70,19 @@ class LocalKey(Resource):
Resource.__init__(self)
def update_response_body(self, time_now_msec: int) -> None:
- refresh_interval = self.config.key_refresh_interval
+ refresh_interval = self.config.key.key_refresh_interval
self.valid_until_ts = int(time_now_msec + refresh_interval)
self.response_body = encode_canonical_json(self.response_json_object())
def response_json_object(self) -> JsonDict:
verify_keys = {}
- for key in self.config.signing_key:
+ for key in self.config.key.signing_key:
verify_key_bytes = key.verify_key.encode()
key_id = "%s:%s" % (key.alg, key.version)
verify_keys[key_id] = {"key": encode_base64(verify_key_bytes)}
old_verify_keys = {}
- for key_id, key in self.config.old_signing_keys.items():
+ for key_id, key in self.config.key.old_signing_keys.items():
verify_key_bytes = key.encode()
old_verify_keys[key_id] = {
"key": encode_base64(verify_key_bytes),
@@ -95,13 +95,13 @@ class LocalKey(Resource):
"verify_keys": verify_keys,
"old_verify_keys": old_verify_keys,
}
- for key in self.config.signing_key:
+ for key in self.config.key.signing_key:
json_object = sign_json(json_object, self.config.server.server_name, key)
return json_object
def render_GET(self, request: Request) -> int:
time_now = self.clock.time_msec()
# Update the expiry time if less than half the interval remains.
- if time_now + self.config.key_refresh_interval / 2 > self.valid_until_ts:
+ if time_now + self.config.key.key_refresh_interval / 2 > self.valid_until_ts:
self.update_response_body(time_now)
return respond_with_json_bytes(request, 200, self.response_body)
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index d8fd7938a4..c111a9d20f 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -97,7 +97,9 @@ class RemoteKey(DirectServeJsonResource):
self.fetcher = ServerKeyFetcher(hs)
self.store = hs.get_datastore()
self.clock = hs.get_clock()
- self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+ self.federation_domain_whitelist = (
+ hs.config.federation.federation_domain_whitelist
+ )
self.config = hs.config
async def _async_render_GET(self, request: Request) -> None:
@@ -235,7 +237,7 @@ class RemoteKey(DirectServeJsonResource):
signed_keys = []
for key_json in json_results:
key_json = json_decoder.decode(key_json.decode("utf-8"))
- for signing_key in self.config.key_server_signing_keys:
+ for signing_key in self.config.key.key_server_signing_keys:
key_json = sign_json(
key_json, self.config.server.server_name, signing_key
)
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 50e4c9e29f..a30007a1e2 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -92,7 +92,9 @@ class MediaRepository:
self.recently_accessed_remotes: Set[Tuple[str, str]] = set()
self.recently_accessed_locals: Set[str] = set()
- self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+ self.federation_domain_whitelist = (
+ hs.config.federation.federation_domain_whitelist
+ )
# List of StorageProviders where we should search for media and
# potentially upload to.
diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index 47a2f72b32..086c80b723 100644
--- a/synapse/rest/synapse/client/__init__.py
+++ b/synapse/rest/synapse/client/__init__.py
@@ -45,7 +45,7 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc
# provider-specific SSO bits. Only load these if they are enabled, since they
# rely on optional dependencies.
- if hs.config.oidc_enabled:
+ if hs.config.oidc.oidc_enabled:
from synapse.rest.synapse.client.oidc import OIDCResource
resources["/_synapse/client/oidc"] = OIDCResource(hs)
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index a4ec6bc328..ddb162a4fc 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -82,7 +82,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
if (
self.hs.config.worker.run_background_tasks
- and self.hs.config.metrics_flags.known_servers
+ and self.hs.config.metrics.metrics_flags.known_servers
):
self._known_servers_count = 1
self.hs.get_clock().looping_call(
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index f76fea4f66..8a4ef13054 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -217,7 +217,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
- key=self.hs.config.macaroon_secret_key,
+ key=self.hs.config.key.macaroon_secret_key,
)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
@@ -239,7 +239,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
- key=self.hs.config.macaroon_secret_key,
+ key=self.hs.config.key.macaroon_secret_key,
)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py
index d66aeb00eb..19eb4c79d0 100644
--- a/tests/app/test_phone_stats_home.py
+++ b/tests/app/test_phone_stats_home.py
@@ -172,7 +172,7 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase):
# We don't want our tests to actually report statistics, so check
# that it's not enabled
- assert not hs.config.report_stats
+ assert not hs.config.metrics.report_stats
# This starts the needed data collection that we rely on to calculate
# R30v2 metrics.
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 903c69127d..ef6c2beec7 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -52,10 +52,10 @@ class ConfigLoadingTestCase(unittest.TestCase):
hasattr(config, "macaroon_secret_key"),
"Want config to have attr macaroon_secret_key",
)
- if len(config.macaroon_secret_key) < 5:
+ if len(config.key.macaroon_secret_key) < 5:
self.fail(
"Want macaroon secret key to be string of at least length 5,"
- "was: %r" % (config.macaroon_secret_key,)
+ "was: %r" % (config.key.macaroon_secret_key,)
)
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
@@ -63,10 +63,10 @@ class ConfigLoadingTestCase(unittest.TestCase):
hasattr(config, "macaroon_secret_key"),
"Want config to have attr macaroon_secret_key",
)
- if len(config.macaroon_secret_key) < 5:
+ if len(config.key.macaroon_secret_key) < 5:
self.fail(
"Want macaroon secret key to be string of at least length 5,"
- "was: %r" % (config.macaroon_secret_key,)
+ "was: %r" % (config.key.macaroon_secret_key,)
)
def test_load_succeeds_if_macaroon_secret_key_missing(self):
@@ -101,7 +101,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
# The default Metrics Flags are off by default.
config = HomeServerConfig.load_config("", ["-c", self.file])
- self.assertFalse(config.metrics_flags.known_servers)
+ self.assertFalse(config.metrics.metrics_flags.known_servers)
def generate_config(self):
with redirect_stdout(StringIO()):
diff --git a/tests/config/test_ratelimiting.py b/tests/config/test_ratelimiting.py
index 3c7bb32e07..1b63e1adfd 100644
--- a/tests/config/test_ratelimiting.py
+++ b/tests/config/test_ratelimiting.py
@@ -30,7 +30,7 @@ class RatelimitConfigTestCase(TestCase):
config = HomeServerConfig()
config.parse_config_dict(config_dict, "", "")
- config_obj = config.rc_federation
+ config_obj = config.ratelimiting.rc_federation
self.assertEqual(config_obj.window_size, 20000)
self.assertEqual(config_obj.sleep_limit, 693)
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index 5f3350e490..12857053e7 100644
--- a/tests/handlers/test_auth.py
+++ b/tests/handlers/test_auth.py
@@ -67,7 +67,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
v.satisfy_general(verify_type)
v.satisfy_general(verify_nonce)
v.satisfy_general(verify_guest)
- v.verify(macaroon, self.hs.config.macaroon_secret_key)
+ v.verify(macaroon, self.hs.config.key.macaroon_secret_key)
def test_short_term_login_token_gives_user_id(self):
token = self.macaroon_generator.generate_short_term_login_token(
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index e9fd991718..c7555c26db 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -328,7 +328,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
# Set up TCP replication between master and the new worker if we don't
# have Redis support enabled.
- if not worker_hs.config.redis_enabled:
+ if not worker_hs.config.redis.redis_enabled:
repl_handler = ReplicationCommandHandler(worker_hs)
client = ClientReplicationStreamProtocol(
worker_hs,
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 414c8781a9..371615a015 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -815,9 +815,9 @@ class JWTTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
self.hs = self.setup_test_homeserver()
- self.hs.config.jwt_enabled = True
- self.hs.config.jwt_secret = self.jwt_secret
- self.hs.config.jwt_algorithm = self.jwt_algorithm
+ self.hs.config.jwt.jwt_enabled = True
+ self.hs.config.jwt.jwt_secret = self.jwt_secret
+ self.hs.config.jwt.jwt_algorithm = self.jwt_algorithm
return self.hs
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str:
@@ -1023,9 +1023,9 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
self.hs = self.setup_test_homeserver()
- self.hs.config.jwt_enabled = True
- self.hs.config.jwt_secret = self.jwt_pubkey
- self.hs.config.jwt_algorithm = "RS256"
+ self.hs.config.jwt.jwt_enabled = True
+ self.hs.config.jwt.jwt_secret = self.jwt_pubkey
+ self.hs.config.jwt.jwt_algorithm = "RS256"
return self.hs
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str:
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 9f3ab2c985..72a5a11b46 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -146,7 +146,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
self.assertEquals(channel.json_body["errcode"], "M_FORBIDDEN")
def test_POST_guest_registration(self):
- self.hs.config.macaroon_secret_key = "test"
+ self.hs.config.key.macaroon_secret_key = "test"
self.hs.config.allow_guest_access = True
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index ebadf47948..cf9748f218 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -513,7 +513,6 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
hs.config.appservice.app_service_config_files = [f1, f2]
hs.config.caches.event_cache_size = 1
- hs.config.password_providers = []
with self.assertRaises(ConfigError) as cm:
database = hs.get_datastores().databases[0]
diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py
index 34aaffe859..89d8656634 100644
--- a/tests/util/test_ratelimitutils.py
+++ b/tests/util/test_ratelimitutils.py
@@ -95,4 +95,4 @@ def build_rc_config(settings: Optional[dict] = None):
config_dict.update(settings or {})
config = HomeServerConfig()
config.parse_config_dict(config_dict, "", "")
- return config.rc_federation
+ return config.ratelimiting.rc_federation
--
cgit 1.5.1
From a7304adc7d383caad1b3f83fa707b1090323ecca Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 23 Sep 2021 17:34:33 +0100
Subject: Factor out `_get_remote_auth_chain_for_event` from
`_update_auth_events_and_context_for_auth` (#10884)
* Reload auth events from db after fetching and persisting
In `_update_auth_events_and_context_for_auth`, when we fetch the remote auth
tree and persist the returned events: load the missing events from the database
rather than using the copies we got from the remote server.
This is mostly in preparation for additional refactors, but does have an
advantage in that if we later get around to checking the rejected status, we'll
be able to make use of it.
* Factor out `_get_remote_auth_chain_for_event` from `_update_auth_events_and_context_for_auth`
* changelog
---
changelog.d/10884.misc | 1 +
synapse/handlers/federation_event.py | 124 ++++++++++++++++++++---------------
2 files changed, 73 insertions(+), 52 deletions(-)
create mode 100644 changelog.d/10884.misc
diff --git a/changelog.d/10884.misc b/changelog.d/10884.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10884.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 10b3fdc222..7d468bd2df 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1505,61 +1505,22 @@ class FederationEventHandler:
# If we don't have all the auth events, we need to get them.
logger.info("auth_events contains unknown events: %s", missing_auth)
try:
- try:
- remote_auth_chain = await self._federation_client.get_event_auth(
- origin, event.room_id, event.event_id
- )
- except RequestSendFailed as e1:
- # The other side isn't around or doesn't implement the
- # endpoint, so lets just bail out.
- logger.info("Failed to get event auth from remote: %s", e1)
- return context, auth_events
-
- seen_remotes = await self._store.have_seen_events(
- event.room_id, [e.event_id for e in remote_auth_chain]
+ await self._get_remote_auth_chain_for_event(
+ origin, event.room_id, event.event_id
)
-
- for auth_event in remote_auth_chain:
- if auth_event.event_id in seen_remotes:
- continue
-
- if auth_event.event_id == event.event_id:
- continue
-
- try:
- auth_ids = auth_event.auth_event_ids()
- auth = {
- (e.type, e.state_key): e
- for e in remote_auth_chain
- if e.event_id in auth_ids or e.type == EventTypes.Create
- }
- auth_event.internal_metadata.outlier = True
-
- logger.debug(
- "_check_event_auth %s missing_auth: %s",
- event.event_id,
- auth_event.event_id,
- )
- missing_auth_event_context = EventContext.for_outlier()
- missing_auth_event_context = await self._check_event_auth(
- origin,
- auth_event,
- missing_auth_event_context,
- claimed_auth_event_map=auth,
- )
- await self.persist_events_and_notify(
- event.room_id, [(auth_event, missing_auth_event_context)]
- )
-
- if auth_event.event_id in event_auth_events:
- auth_events[
- (auth_event.type, auth_event.state_key)
- ] = auth_event
- except AuthError:
- pass
-
except Exception:
logger.exception("Failed to get auth chain")
+ else:
+ # load any auth events we might have persisted from the database. This
+ # has the side-effect of correctly setting the rejected_reason on them.
+ auth_events.update(
+ {
+ (ae.type, ae.state_key): ae
+ for ae in await self._store.get_events_as_list(
+ missing_auth, allow_rejected=True
+ )
+ }
+ )
if event.internal_metadata.is_outlier():
# XXX: given that, for an outlier, we'll be working with the
@@ -1633,6 +1594,65 @@ class FederationEventHandler:
return context, auth_events
+ async def _get_remote_auth_chain_for_event(
+ self, destination: str, room_id: str, event_id: str
+ ) -> None:
+ """If we are missing some of an event's auth events, attempt to request them
+
+ Args:
+ destination: where to fetch the auth tree from
+ room_id: the room in which we are lacking auth events
+ event_id: the event for which we are lacking auth events
+ """
+ try:
+ remote_auth_chain = await self._federation_client.get_event_auth(
+ destination, room_id, event_id
+ )
+ except RequestSendFailed as e1:
+ # The other side isn't around or doesn't implement the
+ # endpoint, so lets just bail out.
+ logger.info("Failed to get event auth from remote: %s", e1)
+ return
+
+ seen_remotes = await self._store.have_seen_events(
+ room_id, [e.event_id for e in remote_auth_chain]
+ )
+
+ for auth_event in remote_auth_chain:
+ if auth_event.event_id in seen_remotes:
+ continue
+
+ if auth_event.event_id == event_id:
+ continue
+
+ try:
+ auth_ids = auth_event.auth_event_ids()
+ auth = {
+ (e.type, e.state_key): e
+ for e in remote_auth_chain
+ if e.event_id in auth_ids or e.type == EventTypes.Create
+ }
+ auth_event.internal_metadata.outlier = True
+
+ logger.debug(
+ "_check_event_auth %s missing_auth: %s",
+ event_id,
+ auth_event.event_id,
+ )
+ missing_auth_event_context = EventContext.for_outlier()
+ missing_auth_event_context = await self._check_event_auth(
+ destination,
+ auth_event,
+ missing_auth_event_context,
+ claimed_auth_event_map=auth,
+ )
+ await self.persist_events_and_notify(
+ room_id,
+ [(auth_event, missing_auth_event_context)],
+ )
+ except AuthError:
+ pass
+
async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
) -> EventContext:
--
cgit 1.5.1
From 90d9fc750514b1ede327f1dfe6e0a1c09b281d6d Mon Sep 17 00:00:00 2001
From: Callum Brown
Date: Thu, 23 Sep 2021 18:58:12 +0100
Subject: Allow `.` and `~` chars in registration tokens (#10887)
Per updates to MSC3231 in order to use the same grammar
as other identifiers.
---
changelog.d/10887.bugfix | 1 +
synapse/rest/admin/registration_tokens.py | 2 +-
tests/rest/admin/test_registration_tokens.py | 8 +++++---
3 files changed, 7 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/10887.bugfix
diff --git a/changelog.d/10887.bugfix b/changelog.d/10887.bugfix
new file mode 100644
index 0000000000..2d1f67489a
--- /dev/null
+++ b/changelog.d/10887.bugfix
@@ -0,0 +1 @@
+Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231).
diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py
index 5a1c929d85..aba48f6e7b 100644
--- a/synapse/rest/admin/registration_tokens.py
+++ b/synapse/rest/admin/registration_tokens.py
@@ -113,7 +113,7 @@ class NewRegistrationTokenRestServlet(RestServlet):
self.store = hs.get_datastore()
self.clock = hs.get_clock()
# A string of all the characters allowed to be in a registration_token
- self.allowed_chars = string.ascii_letters + string.digits + "-_"
+ self.allowed_chars = string.ascii_letters + string.digits + "._~-"
self.allowed_chars_set = set(self.allowed_chars)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py
index 4927321e5a..9bac423ae0 100644
--- a/tests/rest/admin/test_registration_tokens.py
+++ b/tests/rest/admin/test_registration_tokens.py
@@ -95,8 +95,10 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
def test_create_specifying_fields(self):
"""Create a token specifying the value of all fields."""
+ # As many of the allowed characters as possible with length <= 64
+ token = "adefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._~-"
data = {
- "token": "abcd",
+ "token": token,
"uses_allowed": 1,
"expiry_time": self.clock.time_msec() + 1000000,
}
@@ -109,7 +111,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
- self.assertEqual(channel.json_body["token"], "abcd")
+ self.assertEqual(channel.json_body["token"], token)
self.assertEqual(channel.json_body["uses_allowed"], 1)
self.assertEqual(channel.json_body["expiry_time"], data["expiry_time"])
self.assertEqual(channel.json_body["pending"], 0)
@@ -193,7 +195,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
"""Check right error is raised when server can't generate unique token."""
# Create all possible single character tokens
tokens = []
- for c in string.ascii_letters + string.digits + "-_":
+ for c in string.ascii_letters + string.digits + "._~-":
tokens.append(
{
"token": c,
--
cgit 1.5.1
From e704cc2a48c6adc5d3da79a49ed02961edfc3b4a Mon Sep 17 00:00:00 2001
From: Kokokokoka
Date: Fri, 24 Sep 2021 12:19:51 +0300
Subject: In `_purge_history_txn`, ensure that txn.fetchall has elements before
accessing rows (#10690)
This change adds a check for row existence before accessing row element, this should fix issue #10669
Signed-off-by: Vasya Boytsov vasiliy.boytsov@phystech.edu
---
changelog.d/10690.bugfix | 1 +
synapse/storage/databases/main/purge_events.py | 22 +++++++++++++---------
2 files changed, 14 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/10690.bugfix
diff --git a/changelog.d/10690.bugfix b/changelog.d/10690.bugfix
new file mode 100644
index 0000000000..059eea7464
--- /dev/null
+++ b/changelog.d/10690.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka.
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index bccff5e5b9..3eb30944bf 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -102,15 +102,19 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
(room_id,),
)
rows = txn.fetchall()
- max_depth = max(row[1] for row in rows)
-
- if max_depth < token.topological:
- # We need to ensure we don't delete all the events from the database
- # otherwise we wouldn't be able to send any events (due to not
- # having any backwards extremities)
- raise SynapseError(
- 400, "topological_ordering is greater than forward extremeties"
- )
+ # if we already have no forwards extremities (for example because they were
+ # cleared out by the `delete_old_current_state_events` background database
+ # update), then we may as well carry on.
+ if rows:
+ max_depth = max(row[1] for row in rows)
+
+ if max_depth < token.topological:
+ # We need to ensure we don't delete all the events from the database
+ # otherwise we wouldn't be able to send any events (due to not
+ # having any backwards extremities)
+ raise SynapseError(
+ 400, "topological_ordering is greater than forward extremities"
+ )
logger.info("[purge] looking for events to delete")
--
cgit 1.5.1
From 7f3352743e02e0d02ec00eb3a50fd0ceb422286c Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Fri, 24 Sep 2021 10:38:22 +0100
Subject: Improve typing in user_directory files (#10891)
* Improve typing in user_directory files
This makes the user_directory.py in storage pass most of mypy's
checks (including `no-untyped-defs`). Unfortunately that file is in the
tangled web of Store class inheritance so doesn't pass mypy at the moment.
The handlers directory has already been mypyed.
Co-authored-by: reivilibre
---
changelog.d/10891.misc | 1 +
mypy.ini | 2 +
synapse/storage/databases/main/user_directory.py | 124 ++++++++++++++++-------
tests/handlers/test_user_directory.py | 5 +-
4 files changed, 95 insertions(+), 37 deletions(-)
create mode 100644 changelog.d/10891.misc
diff --git a/changelog.d/10891.misc b/changelog.d/10891.misc
new file mode 100644
index 0000000000..6eecea4065
--- /dev/null
+++ b/changelog.d/10891.misc
@@ -0,0 +1 @@
+Improve type hinting in the user directory code.
\ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
index 3cb6cecd7e..437d0a46a5 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -85,9 +85,11 @@ files =
tests/handlers/test_room_summary.py,
tests/handlers/test_send_email.py,
tests/handlers/test_sync.py,
+ tests/handlers/test_user_directory.py,
tests/rest/client/test_login.py,
tests/rest/client/test_auth.py,
tests/storage/test_state.py,
+ tests/storage/test_user_directory.py,
tests/util/test_itertools.py,
tests/util/test_stream_change_cache.py
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 718f3e9976..7ca04237a5 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -14,14 +14,28 @@
import logging
import re
-from typing import Any, Dict, Iterable, Optional, Set, Tuple
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ cast,
+)
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules
-from synapse.storage.database import DatabasePool
+from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.state import StateFilter
from synapse.storage.databases.main.state_deltas import StateDeltasStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
-from synapse.types import get_domain_from_id, get_localpart_from_id
+from synapse.storage.types import Connection
+from synapse.types import JsonDict, get_domain_from_id, get_localpart_from_id
from synapse.util.caches.descriptors import cached
logger = logging.getLogger(__name__)
@@ -36,7 +50,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
# add_users_who_share_private_rooms?
SHARE_PRIVATE_WORKING_SET = 500
- def __init__(self, database: DatabasePool, db_conn, hs):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: Connection,
+ hs: "HomeServer",
+ ):
super().__init__(database, db_conn, hs)
self.server_name = hs.hostname
@@ -57,10 +76,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
"populate_user_directory_cleanup", self._populate_user_directory_cleanup
)
- async def _populate_user_directory_createtables(self, progress, batch_size):
+ async def _populate_user_directory_createtables(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
# Get all the rooms that we want to process.
- def _make_staging_area(txn):
+ def _make_staging_area(txn: LoggingTransaction) -> None:
sql = (
"CREATE TABLE IF NOT EXISTS "
+ TEMP_TABLE
@@ -110,16 +131,20 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
return 1
- async def _populate_user_directory_cleanup(self, progress, batch_size):
+ async def _populate_user_directory_cleanup(
+ self,
+ progress: JsonDict,
+ batch_size: int,
+ ) -> int:
"""
Update the user directory stream position, then clean up the old tables.
"""
position = await self.db_pool.simple_select_one_onecol(
- TEMP_TABLE + "_position", None, "position"
+ TEMP_TABLE + "_position", {}, "position"
)
await self.update_user_directory_stream_pos(position)
- def _delete_staging_area(txn):
+ def _delete_staging_area(txn: LoggingTransaction) -> None:
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_users")
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
@@ -133,18 +158,32 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
return 1
- async def _populate_user_directory_process_rooms(self, progress, batch_size):
+ async def _populate_user_directory_process_rooms(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
"""
+ Rescan the state of all rooms so we can track
+
+ - who's in a public room;
+ - which local users share a private room with other users (local
+ and remote); and
+ - who should be in the user_directory.
+
Args:
progress (dict)
batch_size (int): Maximum number of state events to process
per cycle.
+
+ Returns:
+ number of events processed.
"""
# If we don't have progress filed, delete everything.
if not progress:
await self.delete_all_from_user_dir()
- def _get_next_batch(txn):
+ def _get_next_batch(
+ txn: LoggingTransaction,
+ ) -> Optional[Sequence[Tuple[str, int]]]:
# Only fetch 250 rooms, so we don't fetch too many at once, even
# if those 250 rooms have less than batch_size state events.
sql = """
@@ -155,7 +194,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
TEMP_TABLE + "_rooms",
)
txn.execute(sql)
- rooms_to_work_on = txn.fetchall()
+ rooms_to_work_on = cast(List[Tuple[str, int]], txn.fetchall())
if not rooms_to_work_on:
return None
@@ -163,7 +202,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
# Get how many are left to process, so we can give status on how
# far we are in processing
txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
- progress["remaining"] = txn.fetchone()[0]
+ result = txn.fetchone()
+ assert result is not None
+ progress["remaining"] = result[0]
return rooms_to_work_on
@@ -261,29 +302,33 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
return processed_event_count
- async def _populate_user_directory_process_users(self, progress, batch_size):
+ async def _populate_user_directory_process_users(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
"""
Add all local users to the user directory.
"""
- def _get_next_batch(txn):
+ def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]:
sql = "SELECT user_id FROM %s LIMIT %s" % (
TEMP_TABLE + "_users",
str(batch_size),
)
txn.execute(sql)
- users_to_work_on = txn.fetchall()
+ user_result = cast(List[Tuple[str]], txn.fetchall())
- if not users_to_work_on:
+ if not user_result:
return None
- users_to_work_on = [x[0] for x in users_to_work_on]
+ users_to_work_on = [x[0] for x in user_result]
# Get how many are left to process, so we can give status on how
# far we are in processing
sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users"
txn.execute(sql)
- progress["remaining"] = txn.fetchone()[0]
+ count_result = txn.fetchone()
+ assert count_result is not None
+ progress["remaining"] = count_result[0]
return users_to_work_on
@@ -324,7 +369,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
return len(users_to_work_on)
- async def is_room_world_readable_or_publicly_joinable(self, room_id):
+ async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool:
"""Check if the room is either world_readable or publically joinable"""
# Create a state filter that only queries join and history state event
@@ -368,7 +413,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
if not isinstance(avatar_url, str):
avatar_url = None
- def _update_profile_in_user_dir_txn(txn):
+ def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None:
self.db_pool.simple_upsert_txn(
txn,
table="user_directory",
@@ -435,7 +480,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
for user_id, other_user_id in user_id_tuples
],
value_names=(),
- value_values=None,
+ value_values=(),
desc="add_users_who_share_room",
)
@@ -454,14 +499,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
key_names=["user_id", "room_id"],
key_values=[(user_id, room_id) for user_id in user_ids],
value_names=(),
- value_values=None,
+ value_values=(),
desc="add_users_in_public_rooms",
)
async def delete_all_from_user_dir(self) -> None:
"""Delete the entire user directory"""
- def _delete_all_from_user_dir_txn(txn):
+ def _delete_all_from_user_dir_txn(txn: LoggingTransaction) -> None:
txn.execute("DELETE FROM user_directory")
txn.execute("DELETE FROM user_directory_search")
txn.execute("DELETE FROM users_in_public_rooms")
@@ -473,7 +518,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
@cached()
- async def get_user_in_directory(self, user_id: str) -> Optional[Dict[str, Any]]:
+ async def get_user_in_directory(self, user_id: str) -> Optional[Dict[str, str]]:
return await self.db_pool.simple_select_one(
table="user_directory",
keyvalues={"user_id": user_id},
@@ -497,7 +542,12 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
# add_users_who_share_private_rooms?
SHARE_PRIVATE_WORKING_SET = 500
- def __init__(self, database: DatabasePool, db_conn, hs):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: Connection,
+ hs: "HomeServer",
+ ) -> None:
super().__init__(database, db_conn, hs)
self._prefer_local_users_in_search = (
@@ -506,7 +556,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
self._server_name = hs.config.server.server_name
async def remove_from_user_dir(self, user_id: str) -> None:
- def _remove_from_user_dir_txn(txn):
+ def _remove_from_user_dir_txn(txn: LoggingTransaction) -> None:
self.db_pool.simple_delete_txn(
txn, table="user_directory", keyvalues={"user_id": user_id}
)
@@ -532,7 +582,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
"remove_from_user_dir", _remove_from_user_dir_txn
)
- async def get_users_in_dir_due_to_room(self, room_id):
+ async def get_users_in_dir_due_to_room(self, room_id: str) -> Set[str]:
"""Get all user_ids that are in the room directory because they're
in the given room_id
"""
@@ -565,7 +615,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
room_id
"""
- def _remove_user_who_share_room_txn(txn):
+ def _remove_user_who_share_room_txn(txn: LoggingTransaction) -> None:
self.db_pool.simple_delete_txn(
txn,
table="users_who_share_private_rooms",
@@ -586,7 +636,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
"remove_user_who_share_room", _remove_user_who_share_room_txn
)
- async def get_user_dir_rooms_user_is_in(self, user_id):
+ async def get_user_dir_rooms_user_is_in(self, user_id: str) -> List[str]:
"""
Returns the rooms that a user is in.
@@ -628,7 +678,9 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
A set of room ID's that the users share.
"""
- def _get_shared_rooms_for_users_txn(txn):
+ def _get_shared_rooms_for_users_txn(
+ txn: LoggingTransaction,
+ ) -> List[Dict[str, str]]:
txn.execute(
"""
SELECT p1.room_id
@@ -669,7 +721,9 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
desc="get_user_directory_stream_pos",
)
- async def search_user_dir(self, user_id, search_term, limit):
+ async def search_user_dir(
+ self, user_id: str, search_term: str, limit: int
+ ) -> JsonDict:
"""Searches for users in directory
Returns:
@@ -705,7 +759,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
# We allow manipulating the ranking algorithm by injecting statements
# based on config options.
additional_ordering_statements = []
- ordering_arguments = ()
+ ordering_arguments: Tuple[str, ...] = ()
if isinstance(self.database_engine, PostgresEngine):
full_query, exact_query, prefix_query = _parse_query_postgres(search_term)
@@ -811,7 +865,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
return {"limited": limited, "results": results}
-def _parse_query_sqlite(search_term):
+def _parse_query_sqlite(search_term: str) -> str:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
@@ -826,7 +880,7 @@ def _parse_query_sqlite(search_term):
return " & ".join("(%s* OR %s)" % (result, result) for result in results)
-def _parse_query_postgres(search_term):
+def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index f3684c34a2..ba32585a14 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, Tuple
from unittest.mock import Mock
from urllib.parse import quote
@@ -325,7 +326,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
r.add((i["user_id"], i["other_user_id"], i["room_id"]))
return r
- def get_users_in_public_rooms(self):
+ def get_users_in_public_rooms(self) -> List[Tuple[str, str]]:
r = self.get_success(
self.store.db_pool.simple_select_list(
"users_in_public_rooms", None, ("user_id", "room_id")
@@ -336,7 +337,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
retval.append((i["user_id"], i["room_id"]))
return retval
- def get_users_who_share_private_rooms(self):
+ def get_users_who_share_private_rooms(self) -> List[Tuple[str, str, str]]:
return self.get_success(
self.store.db_pool.simple_select_list(
"users_who_share_private_rooms",
--
cgit 1.5.1
From fa7453638408c2c55fade2d20dba362ff23226e5 Mon Sep 17 00:00:00 2001
From: Jason Robinson
Date: Fri, 24 Sep 2021 12:41:18 +0300
Subject: Fix AuthBlocking check when requester is appservice (#10881)
If the MAU count had been reached, Synapse incorrectly blocked appservice users even though they've been explicitly configured not to be tracked (the default). This was due to bypassing the relevant if as it was chained behind another earlier hit if as an elif.
Signed-off-by: Jason Robinson
---
changelog.d/10881.bugfix | 1 +
synapse/api/auth_blocking.py | 2 +-
tests/api/test_auth.py | 62 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 64 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10881.bugfix
diff --git a/changelog.d/10881.bugfix b/changelog.d/10881.bugfix
new file mode 100644
index 0000000000..0a8905cc46
--- /dev/null
+++ b/changelog.d/10881.bugfix
@@ -0,0 +1 @@
+Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked.
diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py
index a3b95f4de0..08fe160c98 100644
--- a/synapse/api/auth_blocking.py
+++ b/synapse/api/auth_blocking.py
@@ -81,7 +81,7 @@ class AuthBlocking:
# We never block the server from doing actions on behalf of
# users.
return
- elif requester.app_service and not self._track_appservice_user_ips:
+ if requester.app_service and not self._track_appservice_user_ips:
# If we're authenticated as an appservice then we only block
# auth if `track_appservice_user_ips` is set, as that option
# implicitly means that application services are part of MAU
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 8a4ef13054..cccff7af26 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -25,7 +25,9 @@ from synapse.api.errors import (
MissingClientTokenError,
ResourceLimitError,
)
+from synapse.appservice import ApplicationService
from synapse.storage.databases.main.registration import TokenLookupResult
+from synapse.types import Requester
from tests import unittest
from tests.test_utils import simple_async_mock
@@ -290,6 +292,66 @@ class AuthTestCase(unittest.HomeserverTestCase):
# Real users not allowed
self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError)
+ def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self):
+ self.auth_blocking._max_mau_value = 50
+ self.auth_blocking._limit_usage_by_mau = True
+ self.auth_blocking._track_appservice_user_ips = False
+
+ self.store.get_monthly_active_count = simple_async_mock(100)
+ self.store.user_last_seen_monthly_active = simple_async_mock()
+ self.store.is_trial_user = simple_async_mock()
+
+ appservice = ApplicationService(
+ "abcd",
+ self.hs.config.server_name,
+ id="1234",
+ namespaces={
+ "users": [{"regex": "@_appservice.*:sender", "exclusive": True}]
+ },
+ sender="@appservice:sender",
+ )
+ requester = Requester(
+ user="@appservice:server",
+ access_token_id=None,
+ device_id="FOOBAR",
+ is_guest=False,
+ shadow_banned=False,
+ app_service=appservice,
+ authenticated_entity="@appservice:server",
+ )
+ self.get_success(self.auth.check_auth_blocking(requester=requester))
+
+ def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self):
+ self.auth_blocking._max_mau_value = 50
+ self.auth_blocking._limit_usage_by_mau = True
+ self.auth_blocking._track_appservice_user_ips = True
+
+ self.store.get_monthly_active_count = simple_async_mock(100)
+ self.store.user_last_seen_monthly_active = simple_async_mock()
+ self.store.is_trial_user = simple_async_mock()
+
+ appservice = ApplicationService(
+ "abcd",
+ self.hs.config.server_name,
+ id="1234",
+ namespaces={
+ "users": [{"regex": "@_appservice.*:sender", "exclusive": True}]
+ },
+ sender="@appservice:sender",
+ )
+ requester = Requester(
+ user="@appservice:server",
+ access_token_id=None,
+ device_id="FOOBAR",
+ is_guest=False,
+ shadow_banned=False,
+ app_service=appservice,
+ authenticated_entity="@appservice:server",
+ )
+ self.get_failure(
+ self.auth.check_auth_blocking(requester=requester), ResourceLimitError
+ )
+
def test_reserved_threepid(self):
self.auth_blocking._limit_usage_by_mau = True
self.auth_blocking._max_mau_value = 1
--
cgit 1.5.1
From 50022cff966a3991fbd8a1e5c98f490d9b335442 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 24 Sep 2021 11:01:25 +0100
Subject: Add reactor to `SynapseRequest` and fix up types. (#10868)
---
changelog.d/10868.feature | 1 +
synapse/http/server.py | 4 +--
synapse/http/site.py | 37 +++++++++++++++--------
synapse/rest/key/v2/remote_key_resource.py | 9 +++---
synapse/rest/media/v1/_base.py | 7 +++--
synapse/rest/media/v1/config_resource.py | 4 +--
synapse/rest/media/v1/download_resource.py | 5 ++--
synapse/rest/media/v1/media_repository.py | 10 +++++--
synapse/rest/media/v1/preview_url_resource.py | 3 +-
synapse/rest/media/v1/thumbnail_resource.py | 15 +++++-----
synapse/rest/media/v1/upload_resource.py | 4 +--
tests/http/test_additional_resource.py | 8 +++--
tests/logging/test_terse_json.py | 3 +-
tests/replication/test_multi_media_repo.py | 2 +-
tests/rest/admin/test_admin.py | 6 ++--
tests/rest/admin/test_media.py | 6 ++--
tests/rest/admin/test_user.py | 2 +-
tests/rest/client/test_account.py | 4 +--
tests/rest/client/test_consent.py | 12 +++++---
tests/rest/client/utils.py | 2 +-
tests/rest/key/v2/test_remote_key_resource.py | 4 +--
tests/rest/media/v1/test_media_storage.py | 8 ++---
tests/server.py | 6 ++--
tests/test_server.py | 43 ++++++++++++++++++++-------
24 files changed, 123 insertions(+), 82 deletions(-)
create mode 100644 changelog.d/10868.feature
diff --git a/changelog.d/10868.feature b/changelog.d/10868.feature
new file mode 100644
index 0000000000..07e7b2c6a7
--- /dev/null
+++ b/changelog.d/10868.feature
@@ -0,0 +1 @@
+Speed up responding with large JSON objects to requests.
diff --git a/synapse/http/server.py b/synapse/http/server.py
index b79fa722e9..e28b56abb9 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -320,7 +320,7 @@ class DirectServeJsonResource(_AsyncResource):
def _send_response(
self,
- request: Request,
+ request: SynapseRequest,
code: int,
response_object: Any,
):
@@ -629,7 +629,7 @@ def _encode_json_bytes(json_object: Any) -> Iterator[bytes]:
def respond_with_json(
- request: Request,
+ request: SynapseRequest,
code: int,
json_object: Any,
send_cors: bool = False,
diff --git a/synapse/http/site.py b/synapse/http/site.py
index dd4c749e16..755ad56637 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -14,13 +14,14 @@
import contextlib
import logging
import time
-from typing import Optional, Tuple, Union
+from typing import Generator, Optional, Tuple, Union
import attr
from zope.interface import implementer
from twisted.internet.interfaces import IAddress, IReactorTime
from twisted.python.failure import Failure
+from twisted.web.http import HTTPChannel
from twisted.web.resource import IResource, Resource
from twisted.web.server import Request, Site
@@ -61,10 +62,18 @@ class SynapseRequest(Request):
logcontext: the log context for this request
"""
- def __init__(self, channel, *args, max_request_body_size: int = 1024, **kw):
- Request.__init__(self, channel, *args, **kw)
+ def __init__(
+ self,
+ channel: HTTPChannel,
+ site: "SynapseSite",
+ *args,
+ max_request_body_size: int = 1024,
+ **kw,
+ ):
+ super().__init__(channel, *args, **kw)
self._max_request_body_size = max_request_body_size
- self.site: SynapseSite = channel.site
+ self.synapse_site = site
+ self.reactor = site.reactor
self._channel = channel # this is used by the tests
self.start_time = 0.0
@@ -97,7 +106,7 @@ class SynapseRequest(Request):
self.get_method(),
self.get_redacted_uri(),
self.clientproto.decode("ascii", errors="replace"),
- self.site.site_tag,
+ self.synapse_site.site_tag,
)
def handleContentChunk(self, data: bytes) -> None:
@@ -216,7 +225,7 @@ class SynapseRequest(Request):
request=ContextRequest(
request_id=request_id,
ip_address=self.getClientIP(),
- site_tag=self.site.site_tag,
+ site_tag=self.synapse_site.site_tag,
# The requester is going to be unknown at this point.
requester=None,
authenticated_entity=None,
@@ -228,7 +237,7 @@ class SynapseRequest(Request):
)
# override the Server header which is set by twisted
- self.setHeader("Server", self.site.server_version_string)
+ self.setHeader("Server", self.synapse_site.server_version_string)
with PreserveLoggingContext(self.logcontext):
# we start the request metrics timer here with an initial stab
@@ -247,7 +256,7 @@ class SynapseRequest(Request):
requests_counter.labels(self.get_method(), self.request_metrics.name).inc()
@contextlib.contextmanager
- def processing(self):
+ def processing(self) -> Generator[None, None, None]:
"""Record the fact that we are processing this request.
Returns a context manager; the correct way to use this is:
@@ -346,10 +355,10 @@ class SynapseRequest(Request):
self.start_time, name=servlet_name, method=self.get_method()
)
- self.site.access_logger.debug(
+ self.synapse_site.access_logger.debug(
"%s - %s - Received request: %s %s",
self.getClientIP(),
- self.site.site_tag,
+ self.synapse_site.site_tag,
self.get_method(),
self.get_redacted_uri(),
)
@@ -388,13 +397,13 @@ class SynapseRequest(Request):
if authenticated_entity:
requester = f"{authenticated_entity}|{requester}"
- self.site.access_logger.log(
+ self.synapse_site.access_logger.log(
log_level,
"%s - %s - {%s}"
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
' %sB %s "%s %s %s" "%s" [%d dbevts]',
self.getClientIP(),
- self.site.site_tag,
+ self.synapse_site.site_tag,
requester,
processing_time,
response_send_time,
@@ -522,7 +531,7 @@ class SynapseSite(Site):
site_tag: str,
config: ListenerConfig,
resource: IResource,
- server_version_string,
+ server_version_string: str,
max_request_body_size: int,
reactor: IReactorTime,
):
@@ -542,6 +551,7 @@ class SynapseSite(Site):
Site.__init__(self, resource, reactor=reactor)
self.site_tag = site_tag
+ self.reactor = reactor
assert config.http_options is not None
proxied = config.http_options.x_forwarded
@@ -550,6 +560,7 @@ class SynapseSite(Site):
def request_factory(channel, queued: bool) -> Request:
return request_class(
channel,
+ self,
max_request_body_size=max_request_body_size,
queued=queued,
)
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index c111a9d20f..3923ba8439 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -17,12 +17,11 @@ from typing import TYPE_CHECKING, Dict
from signedjson.sign import sign_json
-from twisted.web.server import Request
-
from synapse.api.errors import Codes, SynapseError
from synapse.crypto.keyring import ServerKeyFetcher
from synapse.http.server import DirectServeJsonResource, respond_with_json
from synapse.http.servlet import parse_integer, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import yieldable_gather_results
@@ -102,7 +101,7 @@ class RemoteKey(DirectServeJsonResource):
)
self.config = hs.config
- async def _async_render_GET(self, request: Request) -> None:
+ async def _async_render_GET(self, request: SynapseRequest) -> None:
assert request.postpath is not None
if len(request.postpath) == 1:
(server,) = request.postpath
@@ -119,7 +118,7 @@ class RemoteKey(DirectServeJsonResource):
await self.query_keys(request, query, query_remote_on_cache_miss=True)
- async def _async_render_POST(self, request: Request) -> None:
+ async def _async_render_POST(self, request: SynapseRequest) -> None:
content = parse_json_object_from_request(request)
query = content["server_keys"]
@@ -128,7 +127,7 @@ class RemoteKey(DirectServeJsonResource):
async def query_keys(
self,
- request: Request,
+ request: SynapseRequest,
query: JsonDict,
query_remote_on_cache_miss: bool = False,
) -> None:
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 7c881f2bdb..014fa893d6 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -27,6 +27,7 @@ from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError, cs_error
from synapse.http.server import finish_request, respond_with_json
+from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii
@@ -74,7 +75,7 @@ def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
)
-def respond_404(request: Request) -> None:
+def respond_404(request: SynapseRequest) -> None:
respond_with_json(
request,
404,
@@ -84,7 +85,7 @@ def respond_404(request: Request) -> None:
async def respond_with_file(
- request: Request,
+ request: SynapseRequest,
media_type: str,
file_path: str,
file_size: Optional[int] = None,
@@ -221,7 +222,7 @@ def _can_encode_filename_as_token(x: str) -> bool:
async def respond_with_responder(
- request: Request,
+ request: SynapseRequest,
responder: "Optional[Responder]",
media_type: str,
file_size: Optional[int],
diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py
index a1d36e5cf1..712d4e8368 100644
--- a/synapse/rest/media/v1/config_resource.py
+++ b/synapse/rest/media/v1/config_resource.py
@@ -16,8 +16,6 @@
from typing import TYPE_CHECKING
-from twisted.web.server import Request
-
from synapse.http.server import DirectServeJsonResource, respond_with_json
from synapse.http.site import SynapseRequest
@@ -39,5 +37,5 @@ class MediaConfigResource(DirectServeJsonResource):
await self.auth.get_user_by_req(request)
respond_with_json(request, 200, self.limits_dict, send_cors=True)
- async def _async_render_OPTIONS(self, request: Request) -> None:
+ async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
respond_with_json(request, 200, {}, send_cors=True)
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py
index d6d938953e..6180fa575e 100644
--- a/synapse/rest/media/v1/download_resource.py
+++ b/synapse/rest/media/v1/download_resource.py
@@ -15,10 +15,9 @@
import logging
from typing import TYPE_CHECKING
-from twisted.web.server import Request
-
from synapse.http.server import DirectServeJsonResource, set_cors_headers
from synapse.http.servlet import parse_boolean
+from synapse.http.site import SynapseRequest
from ._base import parse_media_id, respond_404
@@ -37,7 +36,7 @@ class DownloadResource(DirectServeJsonResource):
self.media_repo = media_repo
self.server_name = hs.hostname
- async def _async_render_GET(self, request: Request) -> None:
+ async def _async_render_GET(self, request: SynapseRequest) -> None:
set_cors_headers(request)
request.setHeader(
b"Content-Security-Policy",
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index a30007a1e2..c1bd81100d 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -23,7 +23,6 @@ import twisted.internet.error
import twisted.web.http
from twisted.internet.defer import Deferred
from twisted.web.resource import Resource
-from twisted.web.server import Request
from synapse.api.errors import (
FederationDeniedError,
@@ -34,6 +33,7 @@ from synapse.api.errors import (
)
from synapse.config._base import ConfigError
from synapse.config.repository import ThumbnailRequirement
+from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID
@@ -189,7 +189,7 @@ class MediaRepository:
return "mxc://%s/%s" % (self.server_name, media_id)
async def get_local_media(
- self, request: Request, media_id: str, name: Optional[str]
+ self, request: SynapseRequest, media_id: str, name: Optional[str]
) -> None:
"""Responds to requests for local media, if exists, or returns 404.
@@ -223,7 +223,11 @@ class MediaRepository:
)
async def get_remote_media(
- self, request: Request, server_name: str, media_id: str, name: Optional[str]
+ self,
+ request: SynapseRequest,
+ server_name: str,
+ media_id: str,
+ name: Optional[str],
) -> None:
"""Respond to requests for remote media.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 9ffa983fbb..128706d297 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -29,7 +29,6 @@ import attr
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
-from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
from synapse.http.client import SimpleHttpClient
@@ -168,7 +167,7 @@ class PreviewUrlResource(DirectServeJsonResource):
self._start_expire_url_cache_data, 10 * 1000
)
- async def _async_render_OPTIONS(self, request: Request) -> None:
+ async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
request.setHeader(b"Allow", b"OPTIONS, GET")
respond_with_json(request, 200, {}, send_cors=True)
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index 22f43d8531..cb2f88676e 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -17,11 +17,10 @@
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
-from twisted.web.server import Request
-
from synapse.api.errors import SynapseError
from synapse.http.server import DirectServeJsonResource, set_cors_headers
from synapse.http.servlet import parse_integer, parse_string
+from synapse.http.site import SynapseRequest
from synapse.rest.media.v1.media_storage import MediaStorage
from ._base import (
@@ -57,7 +56,7 @@ class ThumbnailResource(DirectServeJsonResource):
self.dynamic_thumbnails = hs.config.dynamic_thumbnails
self.server_name = hs.hostname
- async def _async_render_GET(self, request: Request) -> None:
+ async def _async_render_GET(self, request: SynapseRequest) -> None:
set_cors_headers(request)
server_name, media_id, _ = parse_media_id(request)
width = parse_integer(request, "width", required=True)
@@ -88,7 +87,7 @@ class ThumbnailResource(DirectServeJsonResource):
async def _respond_local_thumbnail(
self,
- request: Request,
+ request: SynapseRequest,
media_id: str,
width: int,
height: int,
@@ -121,7 +120,7 @@ class ThumbnailResource(DirectServeJsonResource):
async def _select_or_generate_local_thumbnail(
self,
- request: Request,
+ request: SynapseRequest,
media_id: str,
desired_width: int,
desired_height: int,
@@ -186,7 +185,7 @@ class ThumbnailResource(DirectServeJsonResource):
async def _select_or_generate_remote_thumbnail(
self,
- request: Request,
+ request: SynapseRequest,
server_name: str,
media_id: str,
desired_width: int,
@@ -249,7 +248,7 @@ class ThumbnailResource(DirectServeJsonResource):
async def _respond_remote_thumbnail(
self,
- request: Request,
+ request: SynapseRequest,
server_name: str,
media_id: str,
width: int,
@@ -280,7 +279,7 @@ class ThumbnailResource(DirectServeJsonResource):
async def _select_and_respond_with_thumbnail(
self,
- request: Request,
+ request: SynapseRequest,
desired_width: int,
desired_height: int,
desired_method: str,
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py
index 146adca8f1..39b29318bb 100644
--- a/synapse/rest/media/v1/upload_resource.py
+++ b/synapse/rest/media/v1/upload_resource.py
@@ -16,8 +16,6 @@
import logging
from typing import IO, TYPE_CHECKING, Dict, List, Optional
-from twisted.web.server import Request
-
from synapse.api.errors import Codes, SynapseError
from synapse.http.server import DirectServeJsonResource, respond_with_json
from synapse.http.servlet import parse_bytes_from_args
@@ -46,7 +44,7 @@ class UploadResource(DirectServeJsonResource):
self.max_upload_size = hs.config.max_upload_size
self.clock = hs.get_clock()
- async def _async_render_OPTIONS(self, request: Request) -> None:
+ async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
respond_with_json(request, 200, {}, send_cors=True)
async def _async_render_POST(self, request: SynapseRequest) -> None:
diff --git a/tests/http/test_additional_resource.py b/tests/http/test_additional_resource.py
index 768c2ba4ea..391196425c 100644
--- a/tests/http/test_additional_resource.py
+++ b/tests/http/test_additional_resource.py
@@ -45,7 +45,9 @@ class AdditionalResourceTests(HomeserverTestCase):
handler = _AsyncTestCustomEndpoint({}, None).handle_request
resource = AdditionalResource(self.hs, handler)
- channel = make_request(self.reactor, FakeSite(resource), "GET", "/")
+ channel = make_request(
+ self.reactor, FakeSite(resource, self.reactor), "GET", "/"
+ )
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body, {"some_key": "some_value_async"})
@@ -54,7 +56,9 @@ class AdditionalResourceTests(HomeserverTestCase):
handler = _SyncTestCustomEndpoint({}, None).handle_request
resource = AdditionalResource(self.hs, handler)
- channel = make_request(self.reactor, FakeSite(resource), "GET", "/")
+ channel = make_request(
+ self.reactor, FakeSite(resource, self.reactor), "GET", "/"
+ )
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body, {"some_key": "some_value_sync"})
diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py
index 1160716929..f73fcd684e 100644
--- a/tests/logging/test_terse_json.py
+++ b/tests/logging/test_terse_json.py
@@ -152,7 +152,8 @@ class TerseJsonTestCase(LoggerCleanupMixin, TestCase):
site = Mock(spec=["site_tag", "server_version_string", "getResourceFor"])
site.site_tag = "test-site"
site.server_version_string = "Server v1"
- request = SynapseRequest(FakeChannel(site, None))
+ site.reactor = Mock()
+ request = SynapseRequest(FakeChannel(site, None), site)
# Call requestReceived to finish instantiating the object.
request.content = BytesIO()
# Partially skip some of the internal processing of SynapseRequest.
diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py
index 01b1b0d4a0..13aa5eb51a 100644
--- a/tests/replication/test_multi_media_repo.py
+++ b/tests/replication/test_multi_media_repo.py
@@ -68,7 +68,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
resource = hs.get_media_repository_resource().children[b"download"]
channel = make_request(
self.reactor,
- FakeSite(resource),
+ FakeSite(resource, self.reactor),
"GET",
f"/{target}/{media_id}",
shorthand=False,
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index febd40b656..192073c520 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -201,7 +201,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
"""Ensure a piece of media is quarantined when trying to access it."""
channel = make_request(
self.reactor,
- FakeSite(self.download_resource),
+ FakeSite(self.download_resource, self.reactor),
"GET",
server_and_media_id,
shorthand=False,
@@ -271,7 +271,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
# Attempt to access the media
channel = make_request(
self.reactor,
- FakeSite(self.download_resource),
+ FakeSite(self.download_resource, self.reactor),
"GET",
server_name_and_media_id,
shorthand=False,
@@ -458,7 +458,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
# Attempt to access each piece of media
channel = make_request(
self.reactor,
- FakeSite(self.download_resource),
+ FakeSite(self.download_resource, self.reactor),
"GET",
server_and_media_id_2,
shorthand=False,
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index 2f02934e72..f813866073 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -125,7 +125,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
# Attempt to access media
channel = make_request(
self.reactor,
- FakeSite(download_resource),
+ FakeSite(download_resource, self.reactor),
"GET",
server_and_media_id,
shorthand=False,
@@ -164,7 +164,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
# Attempt to access media
channel = make_request(
self.reactor,
- FakeSite(download_resource),
+ FakeSite(download_resource, self.reactor),
"GET",
server_and_media_id,
shorthand=False,
@@ -525,7 +525,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
channel = make_request(
self.reactor,
- FakeSite(download_resource),
+ FakeSite(download_resource, self.reactor),
"GET",
server_and_media_id,
shorthand=False,
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index cc3f16c62a..e79e0e1850 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -2973,7 +2973,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
# Try to access a media and to create `last_access_ts`
channel = make_request(
self.reactor,
- FakeSite(download_resource),
+ FakeSite(download_resource, self.reactor),
"GET",
server_and_media_id,
shorthand=False,
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index b946fca8b3..9e9e953cf4 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -312,7 +312,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
# Load the password reset confirmation page
channel = make_request(
self.reactor,
- FakeSite(self.submit_token_resource),
+ FakeSite(self.submit_token_resource, self.reactor),
"GET",
path,
shorthand=False,
@@ -326,7 +326,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
# Confirm the password reset
channel = make_request(
self.reactor,
- FakeSite(self.submit_token_resource),
+ FakeSite(self.submit_token_resource, self.reactor),
"POST",
path,
content=b"",
diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py
index 65c58ce70a..84d092ca82 100644
--- a/tests/rest/client/test_consent.py
+++ b/tests/rest/client/test_consent.py
@@ -61,7 +61,11 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase):
"""You can observe the terms form without specifying a user"""
resource = consent_resource.ConsentResource(self.hs)
channel = make_request(
- self.reactor, FakeSite(resource), "GET", "/consent?v=1", shorthand=False
+ self.reactor,
+ FakeSite(resource, self.reactor),
+ "GET",
+ "/consent?v=1",
+ shorthand=False,
)
self.assertEqual(channel.code, 200)
@@ -83,7 +87,7 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase):
)
channel = make_request(
self.reactor,
- FakeSite(resource),
+ FakeSite(resource, self.reactor),
"GET",
consent_uri,
access_token=access_token,
@@ -98,7 +102,7 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase):
# POST to the consent page, saying we've agreed
channel = make_request(
self.reactor,
- FakeSite(resource),
+ FakeSite(resource, self.reactor),
"POST",
consent_uri + "&v=" + version,
access_token=access_token,
@@ -110,7 +114,7 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase):
# changed
channel = make_request(
self.reactor,
- FakeSite(resource),
+ FakeSite(resource, self.reactor),
"GET",
consent_uri,
access_token=access_token,
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index c56e45fc10..3075d3f288 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -383,7 +383,7 @@ class RestHelper:
path = "/_matrix/media/r0/upload?filename=%s" % (filename,)
channel = make_request(
self.hs.get_reactor(),
- FakeSite(resource),
+ FakeSite(resource, self.hs.get_reactor()),
"POST",
path,
content=image_data,
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index a75c0ea3f0..4672a68596 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -84,7 +84,7 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase):
Checks that the response is a 200 and returns the decoded json body.
"""
channel = FakeChannel(self.site, self.reactor)
- req = SynapseRequest(channel)
+ req = SynapseRequest(channel, self.site)
req.content = BytesIO(b"")
req.requestReceived(
b"GET",
@@ -183,7 +183,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
)
channel = FakeChannel(self.site, self.reactor)
- req = SynapseRequest(channel)
+ req = SynapseRequest(channel, self.site)
req.content = BytesIO(encode_canonical_json(data))
req.requestReceived(
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index 9ea1c2bf25..44a643d506 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -252,7 +252,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
channel = make_request(
self.reactor,
- FakeSite(self.download_resource),
+ FakeSite(self.download_resource, self.reactor),
"GET",
self.media_id,
shorthand=False,
@@ -384,7 +384,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
params = "?width=32&height=32&method=scale"
channel = make_request(
self.reactor,
- FakeSite(self.thumbnail_resource),
+ FakeSite(self.thumbnail_resource, self.reactor),
"GET",
self.media_id + params,
shorthand=False,
@@ -413,7 +413,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
channel = make_request(
self.reactor,
- FakeSite(self.thumbnail_resource),
+ FakeSite(self.thumbnail_resource, self.reactor),
"GET",
self.media_id + params,
shorthand=False,
@@ -433,7 +433,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
params = "?width=32&height=32&method=" + method
channel = make_request(
self.reactor,
- FakeSite(self.thumbnail_resource),
+ FakeSite(self.thumbnail_resource, self.reactor),
"GET",
self.media_id + params,
shorthand=False,
diff --git a/tests/server.py b/tests/server.py
index b861c7b866..88dfa8058e 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -19,6 +19,7 @@ from twisted.internet.interfaces import (
IPullProducer,
IPushProducer,
IReactorPluggableNameResolver,
+ IReactorTime,
IResolverSimple,
ITransport,
)
@@ -181,13 +182,14 @@ class FakeSite:
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
- def __init__(self, resource: IResource):
+ def __init__(self, resource: IResource, reactor: IReactorTime):
"""
Args:
resource: the resource to be used for rendering all requests
"""
self._resource = resource
+ self.reactor = reactor
def getResourceFor(self, request):
return self._resource
@@ -268,7 +270,7 @@ def make_request(
channel = FakeChannel(site, reactor, ip=client_ip)
- req = request(channel)
+ req = request(channel, site)
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(SEEK_END)
diff --git a/tests/test_server.py b/tests/test_server.py
index 407e172e41..f2ffbc895b 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -65,7 +65,10 @@ class JsonResourceTests(unittest.TestCase):
)
make_request(
- self.reactor, FakeSite(res), b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83"
+ self.reactor,
+ FakeSite(res, self.reactor),
+ b"GET",
+ b"/_matrix/foo/%E2%98%83?a=%E2%98%83",
)
self.assertEqual(got_kwargs, {"room_id": "\N{SNOWMAN}"})
@@ -84,7 +87,9 @@ class JsonResourceTests(unittest.TestCase):
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foo")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo"
+ )
self.assertEqual(channel.result["code"], b"500")
@@ -100,7 +105,7 @@ class JsonResourceTests(unittest.TestCase):
def _callback(request, **kwargs):
d = Deferred()
d.addCallback(_throw)
- self.reactor.callLater(1, d.callback, True)
+ self.reactor.callLater(0.5, d.callback, True)
return make_deferred_yieldable(d)
res = JsonResource(self.homeserver)
@@ -108,7 +113,9 @@ class JsonResourceTests(unittest.TestCase):
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foo")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo"
+ )
self.assertEqual(channel.result["code"], b"500")
@@ -126,7 +133,9 @@ class JsonResourceTests(unittest.TestCase):
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foo")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo"
+ )
self.assertEqual(channel.result["code"], b"403")
self.assertEqual(channel.json_body["error"], "Forbidden!!one!")
@@ -148,7 +157,9 @@ class JsonResourceTests(unittest.TestCase):
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foobar")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foobar"
+ )
self.assertEqual(channel.result["code"], b"400")
self.assertEqual(channel.json_body["error"], "Unrecognized request")
@@ -173,7 +184,9 @@ class JsonResourceTests(unittest.TestCase):
)
# The path was registered as GET, but this is a HEAD request.
- channel = make_request(self.reactor, FakeSite(res), b"HEAD", b"/_matrix/foo")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"HEAD", b"/_matrix/foo"
+ )
self.assertEqual(channel.result["code"], b"200")
self.assertNotIn("body", channel.result)
@@ -280,7 +293,9 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/path"
+ )
self.assertEqual(channel.result["code"], b"200")
body = channel.result["body"]
@@ -298,7 +313,9 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/path"
+ )
self.assertEqual(channel.result["code"], b"301")
headers = channel.result["headers"]
@@ -319,7 +336,9 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
- channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"GET", b"/path"
+ )
self.assertEqual(channel.result["code"], b"304")
headers = channel.result["headers"]
@@ -338,7 +357,9 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
- channel = make_request(self.reactor, FakeSite(res), b"HEAD", b"/path")
+ channel = make_request(
+ self.reactor, FakeSite(res, self.reactor), b"HEAD", b"/path"
+ )
self.assertEqual(channel.result["code"], b"200")
self.assertNotIn("body", channel.result)
--
cgit 1.5.1
From 261c9763c472f0ea1ceac9729dfc3a5da2799300 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 24 Sep 2021 11:56:13 +0100
Subject: Simplify `_auth_and_persist_fetched_events` (#10901)
Combine the two loops over the list of events, and hence get rid of
`_NewEventInfo`. Also pass the event back alongside the context, so that it's
easier to process the result.
---
changelog.d/10901.misc | 1 +
synapse/handlers/federation_event.py | 91 +++++++++---------------------------
2 files changed, 23 insertions(+), 69 deletions(-)
create mode 100644 changelog.d/10901.misc
diff --git a/changelog.d/10901.misc b/changelog.d/10901.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10901.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 7d468bd2df..4eefcc36d8 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -27,11 +27,8 @@ from typing import (
Tuple,
)
-import attr
from prometheus_client import Counter
-from twisted.internet import defer
-
from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
@@ -54,11 +51,7 @@ from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_client import InvalidResponseError
-from synapse.logging.context import (
- make_deferred_yieldable,
- nested_logging_context,
- run_in_background,
-)
+from synapse.logging.context import nested_logging_context, run_in_background
from synapse.logging.utils import log_function
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
@@ -75,7 +68,11 @@ from synapse.types import (
UserID,
get_domain_from_id,
)
-from synapse.util.async_helpers import Linearizer, concurrently_execute
+from synapse.util.async_helpers import (
+ Linearizer,
+ concurrently_execute,
+ yieldable_gather_results,
+)
from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
@@ -92,30 +89,6 @@ soft_failed_event_counter = Counter(
)
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class _NewEventInfo:
- """Holds information about a received event, ready for passing to _auth_and_persist_events
-
- Attributes:
- event: the received event
-
- claimed_auth_event_map: a map of (type, state_key) => event for the event's
- claimed auth_events.
-
- This can include events which have not yet been persisted, in the case that
- we are backfilling a batch of events.
-
- Note: May be incomplete: if we were unable to find all of the claimed auth
- events. Also, treat the contents with caution: the events might also have
- been rejected, might not yet have been authorized themselves, or they might
- be in the wrong room.
-
- """
-
- event: EventBase
- claimed_auth_event_map: StateMap[EventBase]
-
-
class FederationEventHandler:
"""Handles events that originated from federation.
@@ -1203,47 +1176,27 @@ class FederationEventHandler:
allow_rejected=True,
)
- event_infos = []
- for event in fetched_events:
- auth = {}
- for auth_event_id in event.auth_event_ids():
- ae = persisted_events.get(auth_event_id)
- if ae:
- auth[(ae.type, ae.state_key)] = ae
- else:
- logger.info("Missing auth event %s", auth_event_id)
-
- event_infos.append(_NewEventInfo(event, auth))
-
- if not event_infos:
- return
-
- async def prep(ev_info: _NewEventInfo) -> EventContext:
- event = ev_info.event
+ async def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
with nested_logging_context(suffix=event.event_id):
- res = EventContext.for_outlier()
- res = await self._check_event_auth(
+ auth = {}
+ for auth_event_id in event.auth_event_ids():
+ ae = persisted_events.get(auth_event_id)
+ if ae:
+ auth[(ae.type, ae.state_key)] = ae
+ else:
+ logger.info("Missing auth event %s", auth_event_id)
+
+ context = EventContext.for_outlier()
+ context = await self._check_event_auth(
origin,
event,
- res,
- claimed_auth_event_map=ev_info.claimed_auth_event_map,
+ context,
+ claimed_auth_event_map=auth,
)
- return res
-
- contexts = await make_deferred_yieldable(
- defer.gatherResults(
- [run_in_background(prep, ev_info) for ev_info in event_infos],
- consumeErrors=True,
- )
- )
+ return event, context
- await self.persist_events_and_notify(
- room_id,
- [
- (ev_info.event, context)
- for ev_info, context in zip(event_infos, contexts)
- ],
- )
+ events_to_persist = await yieldable_gather_results(prep, fetched_events)
+ await self.persist_events_and_notify(room_id, events_to_persist)
async def _check_event_auth(
self,
--
cgit 1.5.1
From 85551b7a8555eb4e4456d5cf2db0fecd4a44621c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 24 Sep 2021 11:56:33 +0100
Subject: Factor out common code for persisting fetched auth events (#10896)
* Factor more stuff out of `_get_events_and_persist`
It turns out that the event-sorting algorithm in `_get_events_and_persist` is
also useful in other circumstances. Here we move the current
`_auth_and_persist_fetched_events` to `_auth_and_persist_fetched_events_inner`,
and then factor the sorting part out to `_auth_and_persist_fetched_events`.
* `_get_remote_auth_chain_for_event`: remove redundant `outlier` assignment
`get_event_auth` returns events with the outlier flag already set, so this is
redundant (though we need to update a test where `get_event_auth` is mocked).
* `_get_remote_auth_chain_for_event`: move existing-event tests earlier
Move a couple of tests outside the loop. This is a bit inefficient for now, but
a future commit will make it better. It should be functionally identical.
* `_get_remote_auth_chain_for_event`: use `_auth_and_persist_fetched_events`
We can use the same codepath for persisting the events fetched as part of an
auth chain as for those fetched individually by `_get_events_and_persist` for
building the state at a backwards extremity.
* `_get_remote_auth_chain_for_event`: use a dict for efficiency
`_auth_and_persist_fetched_events` sorts the events itself, so we no longer
need to care about maintaining the ordering from `get_event_auth` (and no
longer need to sort by depth in `get_event_auth`).
That means that we can use a map, making it easier to filter out events we
already have, etc.
* changelog
* `_auth_and_persist_fetched_events`: improve docstring
---
changelog.d/10896.misc | 1 +
synapse/federation/federation_client.py | 2 -
synapse/handlers/federation_event.py | 103 +++++++++++++++-----------------
tests/handlers/test_federation.py | 7 ++-
4 files changed, 55 insertions(+), 58 deletions(-)
create mode 100644 changelog.d/10896.misc
diff --git a/changelog.d/10896.misc b/changelog.d/10896.misc
new file mode 100644
index 0000000000..41de995842
--- /dev/null
+++ b/changelog.d/10896.misc
@@ -0,0 +1 @@
+ Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 1416abd0fb..584836c04a 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -501,8 +501,6 @@ class FederationClient(FederationBase):
destination, auth_chain, outlier=True, room_version=room_version
)
- signed_auth.sort(key=lambda e: e.depth)
-
return signed_auth
def _is_unknown_endpoint(
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 4eefcc36d8..8fd9e51044 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1080,7 +1080,7 @@ class FederationEventHandler:
room_version = await self._store.get_room_version(room_id)
- event_map: Dict[str, EventBase] = {}
+ events: List[EventBase] = []
async def get_event(event_id: str) -> None:
with nested_logging_context(event_id):
@@ -1098,8 +1098,7 @@ class FederationEventHandler:
event_id,
)
return
-
- event_map[event.event_id] = event
+ events.append(event)
except Exception as e:
logger.warning(
@@ -1110,11 +1109,29 @@ class FederationEventHandler:
)
await concurrently_execute(get_event, event_ids, 5)
- logger.info("Fetched %i events of %i requested", len(event_map), len(event_ids))
+ logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
+ await self._auth_and_persist_fetched_events(destination, room_id, events)
+
+ async def _auth_and_persist_fetched_events(
+ self, origin: str, room_id: str, events: Iterable[EventBase]
+ ) -> None:
+ """Persist the events fetched by _get_events_and_persist or _get_remote_auth_chain_for_event
+
+ The events to be persisted must be outliers.
+
+ We first sort the events to make sure that we process each event's auth_events
+ before the event itself, and then auth and persist them.
+
+ Notifies about the events where appropriate.
+
+ Params:
+ origin: where the events came from
+ room_id: the room that the events are meant to be in (though this has
+ not yet been checked)
+ events: the events that have been fetched
+ """
+ event_map = {event.event_id: event for event in events}
- # we now need to auth the events in an order which ensures that each event's
- # auth_events are authed before the event itself.
- #
# XXX: it might be possible to kick this process off in parallel with fetching
# the events.
while event_map:
@@ -1141,22 +1158,18 @@ class FederationEventHandler:
"Persisting %i of %i remaining events", len(roots), len(event_map)
)
- await self._auth_and_persist_fetched_events(destination, room_id, roots)
+ await self._auth_and_persist_fetched_events_inner(origin, room_id, roots)
for ev in roots:
del event_map[ev.event_id]
- async def _auth_and_persist_fetched_events(
+ async def _auth_and_persist_fetched_events_inner(
self, origin: str, room_id: str, fetched_events: Collection[EventBase]
) -> None:
- """Persist the events fetched by _get_events_and_persist.
+ """Helper for _auth_and_persist_fetched_events
- The events should not depend on one another, e.g. this should be used to persist
- a bunch of outliers, but not a chunk of individual events that depend
- on each other for state calculations.
-
- We also assume that all of the auth events for all of the events have already
- been persisted.
+ Persists a batch of events where we have (theoretically) already persisted all
+ of their auth events.
Notifies about the events where appropriate.
@@ -1164,7 +1177,7 @@ class FederationEventHandler:
origin: where the events came from
room_id: the room that the events are meant to be in (though this has
not yet been checked)
- event_id: map from event_id -> event for the fetched events
+ fetched_events: the events to persist
"""
# get all the auth events for all the events in this batch. By now, they should
# have been persisted.
@@ -1558,53 +1571,33 @@ class FederationEventHandler:
event_id: the event for which we are lacking auth events
"""
try:
- remote_auth_chain = await self._federation_client.get_event_auth(
- destination, room_id, event_id
- )
+ remote_event_map = {
+ e.event_id: e
+ for e in await self._federation_client.get_event_auth(
+ destination, room_id, event_id
+ )
+ }
except RequestSendFailed as e1:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to get event auth from remote: %s", e1)
return
- seen_remotes = await self._store.have_seen_events(
- room_id, [e.event_id for e in remote_auth_chain]
- )
+ logger.info("/event_auth returned %i events", len(remote_event_map))
- for auth_event in remote_auth_chain:
- if auth_event.event_id in seen_remotes:
- continue
+ # `event` may be returned, but we should not yet process it.
+ remote_event_map.pop(event_id, None)
- if auth_event.event_id == event_id:
- continue
+ # nor should we reprocess any events we have already seen.
+ seen_remotes = await self._store.have_seen_events(
+ room_id, remote_event_map.keys()
+ )
+ for s in seen_remotes:
+ remote_event_map.pop(s, None)
- try:
- auth_ids = auth_event.auth_event_ids()
- auth = {
- (e.type, e.state_key): e
- for e in remote_auth_chain
- if e.event_id in auth_ids or e.type == EventTypes.Create
- }
- auth_event.internal_metadata.outlier = True
-
- logger.debug(
- "_check_event_auth %s missing_auth: %s",
- event_id,
- auth_event.event_id,
- )
- missing_auth_event_context = EventContext.for_outlier()
- missing_auth_event_context = await self._check_event_auth(
- destination,
- auth_event,
- missing_auth_event_context,
- claimed_auth_event_map=auth,
- )
- await self.persist_events_and_notify(
- room_id,
- [(auth_event, missing_auth_event_context)],
- )
- except AuthError:
- pass
+ await self._auth_and_persist_fetched_events(
+ destination, room_id, remote_event_map.values()
+ )
async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 6c67a16de9..936ebf3dde 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -308,7 +308,12 @@ class FederationTestCase(unittest.HomeserverTestCase):
async def get_event_auth(
destination: str, room_id: str, event_id: str
) -> List[EventBase]:
- return auth_events
+ return [
+ event_from_pdu_json(
+ ae.get_pdu_json(), room_version=room_version, outlier=True
+ )
+ for ae in auth_events
+ ]
self.handler.federation_client.get_event_auth = get_event_auth
--
cgit 1.5.1
From bb7fdd821b07016a43bdbb245eda5b35356863c0 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Fri, 24 Sep 2021 07:25:21 -0400
Subject: Use direct references for configuration variables (part 5). (#10897)
---
changelog.d/10897.misc | 1 +
synapse/app/_base.py | 4 ++--
synapse/app/admin_cmd.py | 6 +++---
synapse/app/generic_worker.py | 6 +++---
synapse/app/homeserver.py | 2 +-
synapse/config/logger.py | 4 +++-
synapse/crypto/context_factory.py | 4 ++--
synapse/events/spamcheck.py | 2 +-
synapse/events/third_party_rules.py | 4 ++--
synapse/handlers/auth.py | 10 ++++++----
synapse/handlers/directory.py | 6 +++---
synapse/handlers/federation.py | 2 +-
synapse/handlers/message.py | 8 ++++----
synapse/handlers/register.py | 2 +-
synapse/handlers/room.py | 8 +++++---
synapse/handlers/room_list.py | 2 +-
synapse/handlers/room_member.py | 2 +-
synapse/handlers/saml.py | 15 +++++++-------
synapse/handlers/sso.py | 10 ++++++----
synapse/handlers/stats.py | 2 +-
synapse/handlers/user_directory.py | 2 +-
synapse/logging/opentracing.py | 6 +++---
synapse/replication/http/_base.py | 4 ++--
synapse/replication/tcp/handler.py | 4 ++--
synapse/rest/admin/__init__.py | 2 +-
synapse/rest/client/login.py | 2 +-
synapse/rest/client/user_directory.py | 2 +-
synapse/rest/client/versions.py | 6 +++---
synapse/rest/client/voip.py | 12 +++++------
synapse/rest/media/v1/config_resource.py | 2 +-
synapse/rest/media/v1/media_repository.py | 20 +++++++++++--------
synapse/rest/media/v1/preview_url_resource.py | 10 +++++-----
synapse/rest/media/v1/storage_provider.py | 2 +-
synapse/rest/media/v1/thumbnail_resource.py | 2 +-
synapse/rest/media/v1/upload_resource.py | 2 +-
synapse/rest/synapse/client/__init__.py | 2 +-
.../rest/synapse/client/saml2/metadata_resource.py | 2 +-
synapse/server_notices/server_notices_manager.py | 23 +++++++++++-----------
synapse/storage/databases/main/registration.py | 2 +-
synapse/storage/databases/main/stats.py | 2 +-
synapse/storage/databases/main/user_directory.py | 4 ++--
tests/handlers/test_directory.py | 4 +++-
tests/handlers/test_stats.py | 8 ++++----
tests/handlers/test_user_directory.py | 6 +++---
tests/rest/admin/test_media.py | 4 ++--
tests/rest/admin/test_user.py | 2 +-
tests/rest/media/v1/test_media_storage.py | 2 +-
.../test_resource_limits_server_notices.py | 2 +-
48 files changed, 128 insertions(+), 113 deletions(-)
create mode 100644 changelog.d/10897.misc
diff --git a/changelog.d/10897.misc b/changelog.d/10897.misc
new file mode 100644
index 0000000000..586a0b3a96
--- /dev/null
+++ b/changelog.d/10897.misc
@@ -0,0 +1 @@
+Use direct references to config flags.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index f657f11f76..548f6dcde9 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -88,8 +88,8 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
appname,
soft_file_limit=config.soft_file_limit,
gc_thresholds=config.gc_thresholds,
- pid_file=config.worker_pid_file,
- daemonize=config.worker_daemonize,
+ pid_file=config.worker.worker_pid_file,
+ daemonize=config.worker.worker_daemonize,
print_pidfile=config.print_pidfile,
logger=logger,
run_command=run_command,
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 259d5ec7cc..f2c5b75247 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -186,9 +186,9 @@ def start(config_options):
config.worker.worker_app = "synapse.app.admin_cmd"
if (
- not config.worker_daemonize
- and not config.worker_log_file
- and not config.worker_log_config
+ not config.worker.worker_daemonize
+ and not config.worker.worker_log_file
+ and not config.worker.worker_log_config
):
# Since we're meant to be run as a "command" let's not redirect stdio
# unless we've actually set log config.
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index e0776689ce..3036e1b4a0 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -140,7 +140,7 @@ class KeyUploadServlet(RestServlet):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.http_client = hs.get_simple_http_client()
- self.main_uri = hs.config.worker_main_http_uri
+ self.main_uri = hs.config.worker.worker_main_http_uri
async def on_POST(self, request: Request, device_id: Optional[str]):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
@@ -321,7 +321,7 @@ class GenericWorkerServer(HomeServer):
elif name == "federation":
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
elif name == "media":
- if self.config.can_load_media_repo:
+ if self.config.media.can_load_media_repo:
media_repo = self.get_media_repository_resource()
# We need to serve the admin servlets for media on the
@@ -384,7 +384,7 @@ class GenericWorkerServer(HomeServer):
logger.info("Synapse worker now listening on port %d", port)
def start_listening(self):
- for listener in self.config.worker_listeners:
+ for listener in self.config.worker.worker_listeners:
if listener.type == "http":
self._listen_http(listener)
elif listener.type == "manhole":
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index f1769f146b..205831dcda 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -234,7 +234,7 @@ class SynapseHomeServer(HomeServer):
)
if name in ["media", "federation", "client"]:
- if self.config.enable_media_repo:
+ if self.config.media.enable_media_repo:
media_repo = self.get_media_repository_resource()
resources.update(
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index bf8ca7d5fe..0a08231e5a 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -322,7 +322,9 @@ def setup_logging(
"""
log_config_path = (
- config.worker_log_config if use_worker_options else config.logging.log_config
+ config.worker.worker_log_config
+ if use_worker_options
+ else config.logging.log_config
)
# Perform one-time logging configuration.
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index d310976fe3..2a6110eb10 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -74,8 +74,8 @@ class ServerContextFactory(ContextFactory):
context.set_options(
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
)
- context.use_certificate_chain_file(config.tls_certificate_file)
- context.use_privatekey(config.tls_private_key)
+ context.use_certificate_chain_file(config.tls.tls_certificate_file)
+ context.use_privatekey(config.tls.tls_private_key)
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
context.set_cipher_list(
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 57f1d53fa8..19ee246f96 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -78,7 +78,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
"""
spam_checkers: List[Any] = []
api = hs.get_module_api()
- for module, config in hs.config.spam_checkers:
+ for module, config in hs.config.spamchecker.spam_checkers:
# Older spam checkers don't accept the `api` argument, so we
# try and detect support.
spam_args = inspect.getfullargspec(module)
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 7a6eb3e516..d94b1bb4d2 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -42,10 +42,10 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
"""Wrapper that loads a third party event rules module configured using the old
configuration, and registers the hooks they implement.
"""
- if hs.config.third_party_event_rules is None:
+ if hs.config.thirdpartyrules.third_party_event_rules is None:
return
- module, config = hs.config.third_party_event_rules
+ module, config = hs.config.thirdpartyrules.third_party_event_rules
api = hs.get_module_api()
third_party_rules = module(config=config, module_api=api)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 0f80dfdc43..a8c717efd5 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -277,23 +277,25 @@ class AuthHandler(BaseHandler):
# after the SSO completes and before redirecting them back to their client.
# It notifies the user they are about to give access to their matrix account
# to the client.
- self._sso_redirect_confirm_template = hs.config.sso_redirect_confirm_template
+ self._sso_redirect_confirm_template = (
+ hs.config.sso.sso_redirect_confirm_template
+ )
# The following template is shown during user interactive authentication
# in the fallback auth scenario. It notifies the user that they are
# authenticating for an operation to occur on their account.
- self._sso_auth_confirm_template = hs.config.sso_auth_confirm_template
+ self._sso_auth_confirm_template = hs.config.sso.sso_auth_confirm_template
# The following template is shown during the SSO authentication process if
# the account is deactivated.
self._sso_account_deactivated_template = (
- hs.config.sso_account_deactivated_template
+ hs.config.sso.sso_account_deactivated_template
)
self._server_name = hs.config.server.server_name
# cast to tuple for use with str.startswith
- self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
+ self._whitelisted_sso_clients = tuple(hs.config.sso.sso_client_whitelist)
# A mapping of user ID to extra attributes to include in the login
# response.
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index d487fee627..5cfba3c817 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -48,7 +48,7 @@ class DirectoryHandler(BaseHandler):
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastore()
self.config = hs.config
- self.enable_room_list_search = hs.config.enable_room_list_search
+ self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.require_membership = hs.config.require_membership_for_aliases
self.third_party_event_rules = hs.get_third_party_event_rules()
@@ -143,7 +143,7 @@ class DirectoryHandler(BaseHandler):
):
raise AuthError(403, "This user is not permitted to create this alias")
- if not self.config.is_alias_creation_allowed(
+ if not self.config.roomdirectory.is_alias_creation_allowed(
user_id, room_id, room_alias_str
):
# Lets just return a generic message, as there may be all sorts of
@@ -459,7 +459,7 @@ class DirectoryHandler(BaseHandler):
if canonical_alias:
room_aliases.append(canonical_alias)
- if not self.config.is_publishing_room_allowed(
+ if not self.config.roomdirectory.is_publishing_room_allowed(
user_id, room_id, room_aliases
):
# Lets just return a generic message, as there may be all sorts of
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 4523b25636..b17ef2a9a1 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -91,7 +91,7 @@ class FederationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self._event_auth_handler = hs.get_event_auth_handler()
- self._server_notices_mxid = hs.config.server_notices_mxid
+ self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.config = hs.config
self.http_client = hs.get_proxied_blacklisted_http_client()
self._replication = hs.get_replication_data_handler()
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index ad4e4a3d6f..c66aefe2c4 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -692,10 +692,10 @@ class EventCreationHandler:
return False
async def _is_server_notices_room(self, room_id: str) -> bool:
- if self.config.server_notices_mxid is None:
+ if self.config.servernotices.server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
- return self.config.server_notices_mxid in user_ids
+ return self.config.servernotices.server_notices_mxid in user_ids
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
"""Check if a user has accepted the privacy policy
@@ -731,8 +731,8 @@ class EventCreationHandler:
# exempt the system notices user
if (
- self.config.server_notices_mxid is not None
- and user_id == self.config.server_notices_mxid
+ self.config.servernotices.server_notices_mxid is not None
+ and user_id == self.config.servernotices.server_notices_mxid
):
return
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 01c5e1385d..4f99f137a2 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -98,7 +98,7 @@ class RegistrationHandler(BaseHandler):
self.macaroon_gen = hs.get_macaroon_generator()
self._account_validity_handler = hs.get_account_validity_handler()
self._user_consent_version = self.hs.config.consent.user_consent_version
- self._server_notices_mxid = hs.config.server_notices_mxid
+ self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self._server_name = hs.hostname
self.spam_checker = hs.get_spam_checker()
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index b5768220d9..408b7d7b74 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -126,7 +126,7 @@ class RoomCreationHandler(BaseHandler):
for preset_name, preset_config in self._presets_dict.items():
encrypted = (
preset_name
- in self.config.encryption_enabled_by_default_for_room_presets
+ in self.config.room.encryption_enabled_by_default_for_room_presets
)
preset_config["encrypted"] = encrypted
@@ -141,7 +141,7 @@ class RoomCreationHandler(BaseHandler):
self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
)
- self._server_notices_mxid = hs.config.server_notices_mxid
+ self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.third_party_event_rules = hs.get_third_party_event_rules()
@@ -757,7 +757,9 @@ class RoomCreationHandler(BaseHandler):
)
if is_public:
- if not self.config.is_publishing_room_allowed(user_id, room_id, room_alias):
+ if not self.config.roomdirectory.is_publishing_room_allowed(
+ user_id, room_id, room_alias
+ ):
# Lets just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule?
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index c83ff585e3..c3d4199ed1 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -52,7 +52,7 @@ EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
class RoomListHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.enable_room_list_search = hs.config.enable_room_list_search
+ self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.response_cache: ResponseCache[
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
] = ResponseCache(hs.get_clock(), "room_list")
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 7bb3f0bc47..1a56c82fbd 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -88,7 +88,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
- self._server_notices_mxid = self.config.server_notices_mxid
+ self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.enable_3pid_lookup
self.allow_per_room_profiles = self.config.allow_per_room_profiles
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 185befbe9f..2fed9f377a 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -54,19 +54,18 @@ class Saml2SessionData:
class SamlHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self._saml_client = Saml2Client(hs.config.saml2_sp_config)
- self._saml_idp_entityid = hs.config.saml2_idp_entityid
+ self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config)
+ self._saml_idp_entityid = hs.config.saml2.saml2_idp_entityid
- self._saml2_session_lifetime = hs.config.saml2_session_lifetime
+ self._saml2_session_lifetime = hs.config.saml2.saml2_session_lifetime
self._grandfathered_mxid_source_attribute = (
- hs.config.saml2_grandfathered_mxid_source_attribute
+ hs.config.saml2.saml2_grandfathered_mxid_source_attribute
)
self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements
- self._error_template = hs.config.sso_error_template
# plugin to do custom mapping from saml response to mxid
- self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
- hs.config.saml2_user_mapping_provider_config,
+ self._user_mapping_provider = hs.config.saml2.saml2_user_mapping_provider_class(
+ hs.config.saml2.saml2_user_mapping_provider_config,
ModuleApi(hs, hs.get_auth_handler()),
)
@@ -411,7 +410,7 @@ class DefaultSamlMappingProvider:
self._mxid_mapper = parsed_config.mxid_mapper
self._grandfathered_mxid_source_attribute = (
- module_api._hs.config.saml2_grandfathered_mxid_source_attribute
+ module_api._hs.config.saml2.saml2_grandfathered_mxid_source_attribute
)
def get_remote_user_id(
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index e044251a13..49fde01cf0 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -184,15 +184,17 @@ class SsoHandler:
self._server_name = hs.hostname
self._registration_handler = hs.get_registration_handler()
self._auth_handler = hs.get_auth_handler()
- self._error_template = hs.config.sso_error_template
- self._bad_user_template = hs.config.sso_auth_bad_user_template
+ self._error_template = hs.config.sso.sso_error_template
+ self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler()
# The following template is shown after a successful user interactive
# authentication session. It tells the user they can close the window.
- self._sso_auth_success_template = hs.config.sso_auth_success_template
+ self._sso_auth_success_template = hs.config.sso.sso_auth_success_template
- self._sso_update_profile_information = hs.config.sso_update_profile_information
+ self._sso_update_profile_information = (
+ hs.config.sso.sso_update_profile_information
+ )
# a lock on the mappings
self._mapping_lock = Linearizer(name="sso_user_mapping", clock=hs.get_clock())
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 9fc53333fc..bd3e6f2ec7 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -46,7 +46,7 @@ class StatsHandler:
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
- self.stats_enabled = hs.config.stats_enabled
+ self.stats_enabled = hs.config.stats.stats_enabled
# The current position in the current_state_delta stream
self.pos: Optional[int] = None
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 8dc46d7674..b91e7cb501 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -61,7 +61,7 @@ class UserDirectoryHandler(StateDeltasHandler):
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.update_user_directory = hs.config.update_user_directory
- self.search_all_users = hs.config.user_directory_search_all_users
+ self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
self.spam_checker = hs.get_spam_checker()
# The current position in the current_state_delta stream
self.pos: Optional[int] = None
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index c6c4d3bd29..03d2dd94f6 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -363,7 +363,7 @@ def noop_context_manager(*args, **kwargs):
def init_tracer(hs: "HomeServer"):
"""Set the whitelists and initialise the JaegerClient tracer"""
global opentracing
- if not hs.config.opentracer_enabled:
+ if not hs.config.tracing.opentracer_enabled:
# We don't have a tracer
opentracing = None
return
@@ -377,12 +377,12 @@ def init_tracer(hs: "HomeServer"):
# Pull out the jaeger config if it was given. Otherwise set it to something sensible.
# See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py
- set_homeserver_whitelist(hs.config.opentracer_whitelist)
+ set_homeserver_whitelist(hs.config.tracing.opentracer_whitelist)
from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
config = JaegerConfig(
- config=hs.config.jaeger_config,
+ config=hs.config.tracing.jaeger_config,
service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
scope_manager=LogContextScopeManager(hs.config),
metrics_factory=PrometheusMetricsFactory(),
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 25589b0042..f1b78d09f9 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -168,8 +168,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
client = hs.get_simple_http_client()
local_instance_name = hs.get_instance_name()
- master_host = hs.config.worker_replication_host
- master_port = hs.config.worker_replication_http_port
+ master_host = hs.config.worker.worker_replication_host
+ master_port = hs.config.worker.worker_replication_http_port
instance_map = hs.config.worker.instance_map
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index 509ed7fb13..1438a82b60 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -322,8 +322,8 @@ class ReplicationCommandHandler:
else:
client_name = hs.get_instance_name()
self._factory = DirectTcpReplicationClientFactory(hs, client_name, self)
- host = hs.config.worker_replication_host
- port = hs.config.worker_replication_port
+ host = hs.config.worker.worker_replication_host
+ port = hs.config.worker.worker_replication_port
hs.get_reactor().connectTCP(host.encode(), port, self._factory)
def get_streams(self) -> Dict[str, Stream]:
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index a03774c98a..e1506deb2b 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -267,7 +267,7 @@ def register_servlets_for_client_rest_resource(
# Load the media repo ones if we're using them. Otherwise load the servlets which
# don't need a media repo (typically readonly admin APIs).
- if hs.config.can_load_media_repo:
+ if hs.config.media.can_load_media_repo:
register_servlets_for_media_repo(hs, http_server)
else:
ListMediaInRoom(hs).register(http_server)
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index 64446fc486..fa5c173f4b 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -76,7 +76,7 @@ class LoginRestServlet(RestServlet):
self.jwt_audiences = hs.config.jwt.jwt_audiences
# SSO configuration.
- self.saml2_enabled = hs.config.saml2_enabled
+ self.saml2_enabled = hs.config.saml2.saml2_enabled
self.cas_enabled = hs.config.cas.cas_enabled
self.oidc_enabled = hs.config.oidc.oidc_enabled
self._msc2918_enabled = hs.config.access_token_lifetime is not None
diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py
index 8852811114..a47d9bd01d 100644
--- a/synapse/rest/client/user_directory.py
+++ b/synapse/rest/client/user_directory.py
@@ -58,7 +58,7 @@ class UserDirectorySearchRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
- if not self.hs.config.user_directory_search_enabled:
+ if not self.hs.config.userdirectory.user_directory_search_enabled:
return 200, {"limited": False, "results": []}
body = parse_json_object_from_request(request)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index a1a815cf82..b52a296d8f 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -42,15 +42,15 @@ class VersionsRestServlet(RestServlet):
# Calculate these once since they shouldn't change after start-up.
self.e2ee_forced_public = (
RoomCreationPreset.PUBLIC_CHAT
- in self.config.encryption_enabled_by_default_for_room_presets
+ in self.config.room.encryption_enabled_by_default_for_room_presets
)
self.e2ee_forced_private = (
RoomCreationPreset.PRIVATE_CHAT
- in self.config.encryption_enabled_by_default_for_room_presets
+ in self.config.room.encryption_enabled_by_default_for_room_presets
)
self.e2ee_forced_trusted_private = (
RoomCreationPreset.TRUSTED_PRIVATE_CHAT
- in self.config.encryption_enabled_by_default_for_room_presets
+ in self.config.room.encryption_enabled_by_default_for_room_presets
)
def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
diff --git a/synapse/rest/client/voip.py b/synapse/rest/client/voip.py
index 9d46ed3af3..ea2b8aa45f 100644
--- a/synapse/rest/client/voip.py
+++ b/synapse/rest/client/voip.py
@@ -37,14 +37,14 @@ class VoipRestServlet(RestServlet):
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(
- request, self.hs.config.turn_allow_guests
+ request, self.hs.config.voip.turn_allow_guests
)
- turnUris = self.hs.config.turn_uris
- turnSecret = self.hs.config.turn_shared_secret
- turnUsername = self.hs.config.turn_username
- turnPassword = self.hs.config.turn_password
- userLifetime = self.hs.config.turn_user_lifetime
+ turnUris = self.hs.config.voip.turn_uris
+ turnSecret = self.hs.config.voip.turn_shared_secret
+ turnUsername = self.hs.config.voip.turn_username
+ turnPassword = self.hs.config.voip.turn_password
+ userLifetime = self.hs.config.voip.turn_user_lifetime
if turnUris and turnSecret and userLifetime:
expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000
diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py
index 712d4e8368..a95804d327 100644
--- a/synapse/rest/media/v1/config_resource.py
+++ b/synapse/rest/media/v1/config_resource.py
@@ -31,7 +31,7 @@ class MediaConfigResource(DirectServeJsonResource):
config = hs.config
self.clock = hs.get_clock()
self.auth = hs.get_auth()
- self.limits_dict = {"m.upload.size": config.max_upload_size}
+ self.limits_dict = {"m.upload.size": config.media.max_upload_size}
async def _async_render_GET(self, request: SynapseRequest) -> None:
await self.auth.get_user_by_req(request)
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index c1bd81100d..abd88a2d4f 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -76,16 +76,16 @@ class MediaRepository:
self.clock = hs.get_clock()
self.server_name = hs.hostname
self.store = hs.get_datastore()
- self.max_upload_size = hs.config.max_upload_size
- self.max_image_pixels = hs.config.max_image_pixels
+ self.max_upload_size = hs.config.media.max_upload_size
+ self.max_image_pixels = hs.config.media.max_image_pixels
Thumbnailer.set_limits(self.max_image_pixels)
- self.primary_base_path: str = hs.config.media_store_path
+ self.primary_base_path: str = hs.config.media.media_store_path
self.filepaths: MediaFilePaths = MediaFilePaths(self.primary_base_path)
- self.dynamic_thumbnails = hs.config.dynamic_thumbnails
- self.thumbnail_requirements = hs.config.thumbnail_requirements
+ self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
+ self.thumbnail_requirements = hs.config.media.thumbnail_requirements
self.remote_media_linearizer = Linearizer(name="media_remote")
@@ -100,7 +100,11 @@ class MediaRepository:
# potentially upload to.
storage_providers = []
- for clz, provider_config, wrapper_config in hs.config.media_storage_providers:
+ for (
+ clz,
+ provider_config,
+ wrapper_config,
+ ) in hs.config.media.media_storage_providers:
backend = clz(hs, provider_config)
provider = StorageProviderWrapper(
backend,
@@ -975,7 +979,7 @@ class MediaRepositoryResource(Resource):
def __init__(self, hs: "HomeServer"):
# If we're not configured to use it, raise if we somehow got here.
- if not hs.config.can_load_media_repo:
+ if not hs.config.media.can_load_media_repo:
raise ConfigError("Synapse is not configured to use a media repo.")
super().__init__()
@@ -986,7 +990,7 @@ class MediaRepositoryResource(Resource):
self.putChild(
b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage)
)
- if hs.config.url_preview_enabled:
+ if hs.config.media.url_preview_enabled:
self.putChild(
b"preview_url",
PreviewUrlResource(hs, media_repo, media_repo.media_storage),
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 128706d297..0b0c4d6469 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -125,14 +125,14 @@ class PreviewUrlResource(DirectServeJsonResource):
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.filepaths = media_repo.filepaths
- self.max_spider_size = hs.config.max_spider_size
+ self.max_spider_size = hs.config.media.max_spider_size
self.server_name = hs.hostname
self.store = hs.get_datastore()
self.client = SimpleHttpClient(
hs,
treq_args={"browser_like_redirects": True},
- ip_whitelist=hs.config.url_preview_ip_range_whitelist,
- ip_blacklist=hs.config.url_preview_ip_range_blacklist,
+ ip_whitelist=hs.config.media.url_preview_ip_range_whitelist,
+ ip_blacklist=hs.config.media.url_preview_ip_range_blacklist,
use_proxy=True,
)
self.media_repo = media_repo
@@ -150,8 +150,8 @@ class PreviewUrlResource(DirectServeJsonResource):
or instance_running_jobs == hs.get_instance_name()
)
- self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist
- self.url_preview_accept_language = hs.config.url_preview_accept_language
+ self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist
+ self.url_preview_accept_language = hs.config.media.url_preview_accept_language
# memory cache mapping urls to an ObservableDeferred returning
# JSON-encoded OG metadata
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 6c9969e55f..289e4297f2 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -125,7 +125,7 @@ class FileStorageProviderBackend(StorageProvider):
def __init__(self, hs: "HomeServer", config: str):
self.hs = hs
- self.cache_directory = hs.config.media_store_path
+ self.cache_directory = hs.config.media.media_store_path
self.base_directory = config
def __str__(self) -> str:
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index cb2f88676e..ed91ef5a42 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -53,7 +53,7 @@ class ThumbnailResource(DirectServeJsonResource):
self.store = hs.get_datastore()
self.media_repo = media_repo
self.media_storage = media_storage
- self.dynamic_thumbnails = hs.config.dynamic_thumbnails
+ self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
self.server_name = hs.hostname
async def _async_render_GET(self, request: SynapseRequest) -> None:
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py
index 39b29318bb..7dcb1428e4 100644
--- a/synapse/rest/media/v1/upload_resource.py
+++ b/synapse/rest/media/v1/upload_resource.py
@@ -41,7 +41,7 @@ class UploadResource(DirectServeJsonResource):
self.clock = hs.get_clock()
self.server_name = hs.hostname
self.auth = hs.get_auth()
- self.max_upload_size = hs.config.max_upload_size
+ self.max_upload_size = hs.config.media.max_upload_size
self.clock = hs.get_clock()
async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index 086c80b723..6ad558f5d1 100644
--- a/synapse/rest/synapse/client/__init__.py
+++ b/synapse/rest/synapse/client/__init__.py
@@ -50,7 +50,7 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc
resources["/_synapse/client/oidc"] = OIDCResource(hs)
- if hs.config.saml2_enabled:
+ if hs.config.saml2.saml2_enabled:
from synapse.rest.synapse.client.saml2 import SAML2Resource
res = SAML2Resource(hs)
diff --git a/synapse/rest/synapse/client/saml2/metadata_resource.py b/synapse/rest/synapse/client/saml2/metadata_resource.py
index 64378ed57b..d8eae3970d 100644
--- a/synapse/rest/synapse/client/saml2/metadata_resource.py
+++ b/synapse/rest/synapse/client/saml2/metadata_resource.py
@@ -30,7 +30,7 @@ class SAML2MetadataResource(Resource):
def __init__(self, hs: "HomeServer"):
Resource.__init__(self)
- self.sp_config = hs.config.saml2_sp_config
+ self.sp_config = hs.config.saml2.saml2_sp_config
def render_GET(self, request: Request) -> bytes:
metadata_xml = saml2.metadata.create_metadata_string(
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index d87a538917..cd1c5ff6f4 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -39,7 +39,7 @@ class ServerNoticesManager:
self._server_name = hs.hostname
self._notifier = hs.get_notifier()
- self.server_notices_mxid = self._config.server_notices_mxid
+ self.server_notices_mxid = self._config.servernotices.server_notices_mxid
def is_enabled(self):
"""Checks if server notices are enabled on this server.
@@ -47,7 +47,7 @@ class ServerNoticesManager:
Returns:
bool
"""
- return self._config.server_notices_mxid is not None
+ return self.server_notices_mxid is not None
async def send_notice(
self,
@@ -71,9 +71,9 @@ class ServerNoticesManager:
room_id = await self.get_or_create_notice_room_for_user(user_id)
await self.maybe_invite_user_to_room(user_id, room_id)
- system_mxid = self._config.server_notices_mxid
+ assert self.server_notices_mxid is not None
requester = create_requester(
- system_mxid, authenticated_entity=self._server_name
+ self.server_notices_mxid, authenticated_entity=self._server_name
)
logger.info("Sending server notice to %s", user_id)
@@ -81,7 +81,7 @@ class ServerNoticesManager:
event_dict = {
"type": type,
"room_id": room_id,
- "sender": system_mxid,
+ "sender": self.server_notices_mxid,
"content": event_content,
}
@@ -106,7 +106,7 @@ class ServerNoticesManager:
Returns:
room id of notice room.
"""
- if not self.is_enabled():
+ if self.server_notices_mxid is None:
raise Exception("Server notices not enabled")
assert self._is_mine_id(user_id), "Cannot send server notices to remote users"
@@ -139,12 +139,12 @@ class ServerNoticesManager:
# avatar, we have to use both.
join_profile = None
if (
- self._config.server_notices_mxid_display_name is not None
- or self._config.server_notices_mxid_avatar_url is not None
+ self._config.servernotices.server_notices_mxid_display_name is not None
+ or self._config.servernotices.server_notices_mxid_avatar_url is not None
):
join_profile = {
- "displayname": self._config.server_notices_mxid_display_name,
- "avatar_url": self._config.server_notices_mxid_avatar_url,
+ "displayname": self._config.servernotices.server_notices_mxid_display_name,
+ "avatar_url": self._config.servernotices.server_notices_mxid_avatar_url,
}
requester = create_requester(
@@ -154,7 +154,7 @@ class ServerNoticesManager:
requester,
config={
"preset": RoomCreationPreset.PRIVATE_CHAT,
- "name": self._config.server_notices_room_name,
+ "name": self._config.servernotices.server_notices_room_name,
"power_level_content_override": {"users_default": -10},
},
ratelimit=False,
@@ -178,6 +178,7 @@ class ServerNoticesManager:
user_id: The ID of the user to invite.
room_id: The ID of the room to invite the user to.
"""
+ assert self.server_notices_mxid is not None
requester = create_requester(
self.server_notices_mxid, authenticated_entity=self._server_name
)
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 52ef9deede..c83089ee63 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -2015,7 +2015,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
(user_id_obj.localpart, create_profile_with_displayname),
)
- if self.hs.config.stats_enabled:
+ if self.hs.config.stats.stats_enabled:
# we create a new completed user statistics row
# we don't strictly need current_token since this user really can't
diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index 343d6efc92..e20033bb28 100644
--- a/synapse/storage/databases/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -98,7 +98,7 @@ class StatsStore(StateDeltasStore):
self.server_name = hs.hostname
self.clock = self.hs.get_clock()
- self.stats_enabled = hs.config.stats_enabled
+ self.stats_enabled = hs.config.stats.stats_enabled
self.stats_delta_processing_lock = DeferredLock()
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 7ca04237a5..90d65edc42 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -551,7 +551,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
super().__init__(database, db_conn, hs)
self._prefer_local_users_in_search = (
- hs.config.user_directory_search_prefer_local_users
+ hs.config.userdirectory.user_directory_search_prefer_local_users
)
self._server_name = hs.config.server.server_name
@@ -741,7 +741,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
}
"""
- if self.hs.config.user_directory_search_all_users:
+ if self.hs.config.userdirectory.user_directory_search_all_users:
join_args = (user_id,)
where_clause = "user_id != ?"
else:
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index a0a48b564e..6a2e76ca4a 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -405,7 +405,9 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
- self.hs.config.is_alias_creation_allowed = rd_config.is_alias_creation_allowed
+ self.hs.config.roomdirectory.is_alias_creation_allowed = (
+ rd_config.is_alias_creation_allowed
+ )
return hs
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index 1ba4c05b9b..24b7ef6efc 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -118,7 +118,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertEqual(len(r), 0)
# Disable stats
- self.hs.config.stats_enabled = False
+ self.hs.config.stats.stats_enabled = False
self.handler.stats_enabled = False
u1 = self.register_user("u1", "pass")
@@ -134,7 +134,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertEqual(len(r), 0)
# Enable stats
- self.hs.config.stats_enabled = True
+ self.hs.config.stats.stats_enabled = True
self.handler.stats_enabled = True
# Do the initial population of the user directory via the background update
@@ -469,7 +469,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
behaviour eventually to still keep current rows.
"""
- self.hs.config.stats_enabled = False
+ self.hs.config.stats.stats_enabled = False
self.handler.stats_enabled = False
u1 = self.register_user("u1", "pass")
@@ -481,7 +481,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertIsNone(self._get_current_stats("room", r1))
self.assertIsNone(self._get_current_stats("user", u1))
- self.hs.config.stats_enabled = True
+ self.hs.config.stats.stats_enabled = True
self.handler.stats_enabled = True
self._perform_background_initial_update()
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index ba32585a14..266333c553 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -451,7 +451,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
visible.
"""
self.handler.search_all_users = True
- self.hs.config.user_directory_search_all_users = True
+ self.hs.config.userdirectory.user_directory_search_all_users = True
u1 = self.register_user("user1", "pass")
self.register_user("user2", "pass")
@@ -607,7 +607,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
return hs
def test_disabling_room_list(self):
- self.config.user_directory_search_enabled = True
+ self.config.userdirectory.user_directory_search_enabled = True
# First we create a room with another user so that user dir is non-empty
# for our user
@@ -624,7 +624,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
self.assertTrue(len(channel.json_body["results"]) > 0)
# Disable user directory and check search returns nothing
- self.config.user_directory_search_enabled = False
+ self.config.userdirectory.user_directory_search_enabled = False
channel = self.make_request(
"POST", b"user_directory/search", b'{"search_term":"user2"}'
)
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index f813866073..ce30a19213 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -43,7 +43,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
- self.filepaths = MediaFilePaths(hs.config.media_store_path)
+ self.filepaths = MediaFilePaths(hs.config.media.media_store_path)
def test_no_auth(self):
"""
@@ -200,7 +200,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
- self.filepaths = MediaFilePaths(hs.config.media_store_path)
+ self.filepaths = MediaFilePaths(hs.config.media.media_store_path)
self.url = "/_synapse/admin/v1/media/%s/delete" % self.server_name
def test_no_auth(self):
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index e79e0e1850..ee3ae9cce4 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -2473,7 +2473,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.media_repo = hs.get_media_repository_resource()
- self.filepaths = MediaFilePaths(hs.config.media_store_path)
+ self.filepaths = MediaFilePaths(hs.config.media.media_store_path)
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index 44a643d506..4ae00755c9 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -53,7 +53,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
self.primary_base_path = os.path.join(self.test_dir, "primary")
self.secondary_base_path = os.path.join(self.test_dir, "secondary")
- hs.config.media_store_path = self.primary_base_path
+ hs.config.media.media_store_path = self.primary_base_path
storage_providers = [FileStorageProviderBackend(hs, self.secondary_base_path)]
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 8701b5f7e3..7f25200a5d 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -326,7 +326,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
for event in events:
if (
event["type"] == EventTypes.Message
- and event["sender"] == self.hs.config.server_notices_mxid
+ and event["sender"] == self.hs.config.servernotices.server_notices_mxid
):
notice_in_room = True
--
cgit 1.5.1
From 0420d4e6a5ceb58a453ce0761a15cd8e144da650 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 24 Sep 2021 14:01:45 +0100
Subject: Stop trying to auth/persist events whose auth events we do not have.
(#10907)
---
changelog.d/10907.bugfix | 1 +
synapse/handlers/federation_event.py | 24 ++++++++++++++++--------
2 files changed, 17 insertions(+), 8 deletions(-)
create mode 100644 changelog.d/10907.bugfix
diff --git a/changelog.d/10907.bugfix b/changelog.d/10907.bugfix
new file mode 100644
index 0000000000..601b341f9f
--- /dev/null
+++ b/changelog.d/10907.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 8fd9e51044..01fd841122 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1194,10 +1194,17 @@ class FederationEventHandler:
auth = {}
for auth_event_id in event.auth_event_ids():
ae = persisted_events.get(auth_event_id)
- if ae:
- auth[(ae.type, ae.state_key)] = ae
- else:
- logger.info("Missing auth event %s", auth_event_id)
+ if not ae:
+ logger.warning(
+ "Event %s relies on auth_event %s, which could not be found.",
+ event,
+ auth_event_id,
+ )
+ # the fact we can't find the auth event doesn't mean it doesn't
+ # exist, which means it is premature to reject `event`. Instead we
+ # just ignore it for now.
+ return None
+ auth[(ae.type, ae.state_key)] = ae
context = EventContext.for_outlier()
context = await self._check_event_auth(
@@ -1208,8 +1215,10 @@ class FederationEventHandler:
)
return event, context
- events_to_persist = await yieldable_gather_results(prep, fetched_events)
- await self.persist_events_and_notify(room_id, events_to_persist)
+ events_to_persist = (
+ x for x in await yieldable_gather_results(prep, fetched_events) if x
+ )
+ await self.persist_events_and_notify(room_id, tuple(events_to_persist))
async def _check_event_auth(
self,
@@ -1235,8 +1244,7 @@ class FederationEventHandler:
claimed_auth_event_map:
A map of (type, state_key) => event for the event's claimed auth_events.
- Possibly incomplete, and possibly including events that are not yet
- persisted, or authed, or in the right room.
+ Possibly including events that were rejected, or are in the wrong room.
Only populated when populating outliers.
--
cgit 1.5.1
From ea01d4c2de65f29cf23e2d28786bfc10bd5fd881 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Fri, 24 Sep 2021 15:27:09 +0100
Subject: Update postgresql testing script (#10906)
- Use sytest:bionic. Sytest:latest is two years old (do we want
CI to push out latest at all?) and comes with Python 3.5, which we
explictly no longer support. The script now runs under PostgreSQL 10
as a result.
- Advertise script in the docs
- Move pg testing script to scripts-dev directory
- Write to host as the script's exector, not root
A few changes to make it speedier to re-run the tests:
- Create blank DB in the container, not the script, so we don't have to
`initdb` each time
- Use a named volume to persist the tox environment, so we don't have to
fetch and install a bunch of packages from PyPI each time
Co-authored-by: reivilibre
---
.gitignore | 1 +
changelog.d/10906.misc | 1 +
docker/Dockerfile-pgtests | 24 ++++++++++++++---
docker/run_pg_tests.sh | 7 +++--
docs/development/contributing_guide.md | 47 ++++++++++++++++++++++++++++++++++
scripts-dev/test_postgresql.sh | 19 ++++++++++++++
test_postgresql.sh | 12 ---------
7 files changed, 92 insertions(+), 19 deletions(-)
create mode 100644 changelog.d/10906.misc
create mode 100755 scripts-dev/test_postgresql.sh
delete mode 100755 test_postgresql.sh
diff --git a/.gitignore b/.gitignore
index 6b9257b5c9..fe137f3370 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,7 @@ __pycache__/
/.coverage*
/.mypy_cache/
/.tox
+/.tox-pg-container
/build/
/coverage.*
/dist/
diff --git a/changelog.d/10906.misc b/changelog.d/10906.misc
new file mode 100644
index 0000000000..20a1cbfbd0
--- /dev/null
+++ b/changelog.d/10906.misc
@@ -0,0 +1 @@
+Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker.
\ No newline at end of file
diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests
index 3bfee845c6..92b804d193 100644
--- a/docker/Dockerfile-pgtests
+++ b/docker/Dockerfile-pgtests
@@ -1,6 +1,6 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
-FROM matrixdotorg/sytest:latest
+FROM matrixdotorg/sytest:bionic
# The Sytest image doesn't come with python, so install that
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
@@ -8,5 +8,23 @@ RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
# We need tox to run the tests in run_pg_tests.sh
RUN python3 -m pip install tox
-ADD run_pg_tests.sh /pg_tests.sh
-ENTRYPOINT /pg_tests.sh
+# Initialise the db
+RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
+
+# Add a user with our UID and GID so that files get created on the host owned
+# by us, not root.
+ARG UID
+ARG GID
+RUN groupadd --gid $GID user
+RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
+
+# Ensure we can start postgres by sudo-ing as the postgres user.
+RUN apt-get update && apt-get -qq install -y sudo
+RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+ADD run_pg_tests.sh /run_pg_tests.sh
+# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
+# so that we can `docker run` this container and pass arguments to pg_tests.sh
+ENTRYPOINT ["/run_pg_tests.sh"]
+
+USER user
diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh
index 1fd08cb62b..58e2177d34 100755
--- a/docker/run_pg_tests.sh
+++ b/docker/run_pg_tests.sh
@@ -10,11 +10,10 @@ set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
-# Initialise & start the database
-su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
-su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
+# Start the database
+sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
-tox --workdir=/tmp -e py35-postgres
+tox --workdir=./.tox-pg-container -e py36-postgres "$@"
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 97352b0f26..713366368c 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -170,6 +170,53 @@ To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
```
+### Running tests under PostgreSQL
+
+Invoking `trial` as above will use an in-memory SQLite database. This is great for
+quick development and testing. However, we recommend using a PostgreSQL database
+in production (and indeed, we have some code paths specific to each database).
+This means that we need to run our unit tests against PostgreSQL too. Our CI does
+this automatically for pull requests and release candidates, but it's sometimes
+useful to reproduce this locally.
+
+To do so, [configure Postgres](../postgres.md) and run `trial` with the
+following environment variables matching your configuration:
+
+- `SYNAPSE_POSTGRES` to anything nonempty
+- `SYNAPSE_POSTGRES_HOST`
+- `SYNAPSE_POSTGRES_USER`
+- `SYNAPSE_POSTGRES_PASSWORD`
+
+For example:
+
+```shell
+export SYNAPSE_POSTGRES=1
+export SYNAPSE_POSTGRES_HOST=localhost
+export SYNAPSE_POSTGRES_USER=postgres
+export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
+trial
+```
+
+#### Prebuilt container
+
+Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
+Docker container to set up PostgreSQL and run our tests for us. To do so, run
+
+```shell
+scripts-dev/test_postgresql.sh
+```
+
+Any extra arguments to the script will be passed to `tox` and then to `trial`,
+so we can run a specific test in this container with e.g.
+
+```shell
+scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
+```
+
+The container creates a folder in your Synapse checkout called
+`.tox-pg-container` and uses this as a tox environment. The output of any
+`trial` runs goes into `_trial_temp` in your synapse source directory — the same
+as running `trial` directly on your host machine.
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
diff --git a/scripts-dev/test_postgresql.sh b/scripts-dev/test_postgresql.sh
new file mode 100755
index 0000000000..43cfa256e4
--- /dev/null
+++ b/scripts-dev/test_postgresql.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+# This script builds the Docker image to run the PostgreSQL tests, and then runs
+# the tests. It uses a dedicated tox environment so that we don't have to
+# rebuild it each time.
+
+# Command line arguments to this script are forwarded to "tox" and then to "trial".
+
+set -e
+
+# Build, and tag
+docker build docker/ \
+ --build-arg "UID=$(id -u)" \
+ --build-arg "GID=$(id -g)" \
+ -f docker/Dockerfile-pgtests \
+ -t synapsepgtests
+
+# Run, mounting the current directory into /src
+docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"
diff --git a/test_postgresql.sh b/test_postgresql.sh
deleted file mode 100755
index c10828fbbc..0000000000
--- a/test_postgresql.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-# This script builds the Docker image to run the PostgreSQL tests, and then runs
-# the tests.
-
-set -e
-
-# Build, and tag
-docker build docker/ -f docker/Dockerfile-pgtests -t synapsepgtests
-
-# Run, mounting the current directory into /src
-docker run --rm -it -v $(pwd)\:/src synapsepgtests
--
cgit 1.5.1
From b10257e87972d158f4b6a0c7d1fe7239014ea10a Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Fri, 24 Sep 2021 16:38:23 +0200
Subject: Add a spamchecker callback to allow or deny room creation based on
invites (#10898)
This is in the context of creating new module callbacks that modules in https://github.com/matrix-org/synapse-dinsic can use, in an effort to reconcile the spam checker API in synapse-dinsic with the one in mainline.
This adds a callback that's fairly similar to user_may_create_room except it also allows processing based on the invites sent at room creation.
---
changelog.d/10898.feature | 1 +
docs/modules/spam_checker_callbacks.md | 29 ++++++++
synapse/events/spamcheck.py | 42 ++++++++++++
synapse/handlers/room.py | 14 ++--
tests/rest/client/test_rooms.py | 119 ++++++++++++++++++++++++++++++++-
5 files changed, 199 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/10898.feature
diff --git a/changelog.d/10898.feature b/changelog.d/10898.feature
new file mode 100644
index 0000000000..97fa39fd0c
--- /dev/null
+++ b/changelog.d/10898.feature
@@ -0,0 +1 @@
+Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes.
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 81574a015c..7920ac5f8f 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -38,6 +38,35 @@ async def user_may_create_room(user: str) -> bool
Called when processing a room creation request. The module must return a `bool` indicating
whether the given user (represented by their Matrix user ID) is allowed to create a room.
+### `user_may_create_room_with_invites`
+
+```python
+async def user_may_create_room_with_invites(
+ user: str,
+ invites: List[str],
+ threepid_invites: List[Dict[str, str]],
+) -> bool
+```
+
+Called when processing a room creation request (right after `user_may_create_room`).
+The module is given the Matrix user ID of the user trying to create a room, as well as a
+list of Matrix users to invite and a list of third-party identifiers (3PID, e.g. email
+addresses) to invite.
+
+An invited Matrix user to invite is represented by their Matrix user IDs, and an invited
+3PIDs is represented by a dict that includes the 3PID medium (e.g. "email") through its
+`medium` key and its address (e.g. "alice@example.com") through its `address` key.
+
+See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types) for more
+information regarding third-party identifiers.
+
+If no invite and/or 3PID invite were specified in the room creation request, the
+corresponding list(s) will be empty.
+
+**Note**: This callback is not called when a room is cloned (e.g. during a room upgrade)
+since no invites are sent when cloning a room. To cover this case, modules also need to
+implement `user_may_create_room`.
+
### `user_may_create_room_alias`
```python
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 19ee246f96..c389f70b8d 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -46,6 +46,9 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
]
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
+USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[
+ [str, List[str], List[Dict[str, str]]], Awaitable[bool]
+]
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]]
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]]
@@ -164,6 +167,9 @@ class SpamChecker:
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
+ self._user_may_create_room_with_invites_callbacks: List[
+ USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
+ ] = []
self._user_may_create_room_alias_callbacks: List[
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
] = []
@@ -183,6 +189,9 @@ class SpamChecker:
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
+ user_may_create_room_with_invites: Optional[
+ USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
+ ] = None,
user_may_create_room_alias: Optional[
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
] = None,
@@ -203,6 +212,11 @@ class SpamChecker:
if user_may_create_room is not None:
self._user_may_create_room_callbacks.append(user_may_create_room)
+ if user_may_create_room_with_invites is not None:
+ self._user_may_create_room_with_invites_callbacks.append(
+ user_may_create_room_with_invites,
+ )
+
if user_may_create_room_alias is not None:
self._user_may_create_room_alias_callbacks.append(
user_may_create_room_alias,
@@ -283,6 +297,34 @@ class SpamChecker:
return True
+ async def user_may_create_room_with_invites(
+ self,
+ userid: str,
+ invites: List[str],
+ threepid_invites: List[Dict[str, str]],
+ ) -> bool:
+ """Checks if a given user may create a room with invites
+
+ If this method returns false, the creation request will be rejected.
+
+ Args:
+ userid: The ID of the user attempting to create a room
+ invites: The IDs of the Matrix users to be invited if the room creation is
+ allowed.
+ threepid_invites: The threepids to be invited if the room creation is allowed,
+ as a dict including a "medium" key indicating the threepid's medium (e.g.
+ "email") and an "address" key indicating the threepid's address (e.g.
+ "alice@example.com")
+
+ Returns:
+ True if the user may create the room, otherwise False
+ """
+ for callback in self._user_may_create_room_with_invites_callbacks:
+ if await callback(userid, invites, threepid_invites) is False:
+ return False
+
+ return True
+
async def user_may_create_room_alias(
self, userid: str, room_alias: RoomAlias
) -> bool:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 408b7d7b74..8fede5e935 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -649,8 +649,16 @@ class RoomCreationHandler(BaseHandler):
requester, config, is_requester_admin=is_requester_admin
)
- if not is_requester_admin and not await self.spam_checker.user_may_create_room(
- user_id
+ invite_3pid_list = config.get("invite_3pid", [])
+ invite_list = config.get("invite", [])
+
+ if not is_requester_admin and not (
+ await self.spam_checker.user_may_create_room(user_id)
+ and await self.spam_checker.user_may_create_room_with_invites(
+ user_id,
+ invite_list,
+ invite_3pid_list,
+ )
):
raise SynapseError(403, "You are not permitted to create rooms")
@@ -684,8 +692,6 @@ class RoomCreationHandler(BaseHandler):
if mapping:
raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE)
- invite_3pid_list = config.get("invite_3pid", [])
- invite_list = config.get("invite", [])
for i in invite_list:
try:
uid = UserID.from_string(i)
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index ef847f0f5f..30bdaa9c27 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -18,7 +18,7 @@
"""Tests REST events for /rooms paths."""
import json
-from typing import Iterable
+from typing import Dict, Iterable, List, Optional
from unittest.mock import Mock, call
from urllib import parse as urlparse
@@ -30,7 +30,7 @@ from synapse.api.errors import Codes, HttpResponseException
from synapse.handlers.pagination import PurgeStatus
from synapse.rest import admin
from synapse.rest.client import account, directory, login, profile, room, sync
-from synapse.types import JsonDict, RoomAlias, UserID, create_requester
+from synapse.types import JsonDict, Requester, RoomAlias, UserID, create_requester
from synapse.util.stringutils import random_string
from tests import unittest
@@ -669,6 +669,121 @@ class RoomsCreateTestCase(RoomBase):
channel = self.make_request("POST", "/createRoom", content)
self.assertEqual(200, channel.code)
+ def test_spamchecker_invites(self):
+ """Tests the user_may_create_room_with_invites spam checker callback."""
+
+ # Mock do_3pid_invite, so we don't fail from failing to send a 3PID invite to an
+ # IS.
+ async def do_3pid_invite(
+ room_id: str,
+ inviter: UserID,
+ medium: str,
+ address: str,
+ id_server: str,
+ requester: Requester,
+ txn_id: Optional[str],
+ id_access_token: Optional[str] = None,
+ ) -> int:
+ return 0
+
+ do_3pid_invite_mock = Mock(side_effect=do_3pid_invite)
+ self.hs.get_room_member_handler().do_3pid_invite = do_3pid_invite_mock
+
+ # Add a mock callback for user_may_create_room_with_invites. Make it allow any
+ # room creation request for now.
+ return_value = True
+
+ async def user_may_create_room_with_invites(
+ user: str,
+ invites: List[str],
+ threepid_invites: List[Dict[str, str]],
+ ) -> bool:
+ return return_value
+
+ callback_mock = Mock(side_effect=user_may_create_room_with_invites)
+ self.hs.get_spam_checker()._user_may_create_room_with_invites_callbacks.append(
+ callback_mock,
+ )
+
+ # The MXIDs we'll try to invite.
+ invited_mxids = [
+ "@alice1:red",
+ "@alice2:red",
+ "@alice3:red",
+ "@alice4:red",
+ ]
+
+ # The 3PIDs we'll try to invite.
+ invited_3pids = [
+ {
+ "id_server": "example.com",
+ "id_access_token": "sometoken",
+ "medium": "email",
+ "address": "alice1@example.com",
+ },
+ {
+ "id_server": "example.com",
+ "id_access_token": "sometoken",
+ "medium": "email",
+ "address": "alice2@example.com",
+ },
+ {
+ "id_server": "example.com",
+ "id_access_token": "sometoken",
+ "medium": "email",
+ "address": "alice3@example.com",
+ },
+ ]
+
+ # Create a room and invite the Matrix users, and check that it succeeded.
+ channel = self.make_request(
+ "POST",
+ "/createRoom",
+ json.dumps({"invite": invited_mxids}).encode("utf8"),
+ )
+ self.assertEqual(200, channel.code)
+
+ # Check that the callback was called with the right arguments.
+ expected_call_args = ((self.user_id, invited_mxids, []),)
+ self.assertEquals(
+ callback_mock.call_args,
+ expected_call_args,
+ callback_mock.call_args,
+ )
+
+ # Create a room and invite the 3PIDs, and check that it succeeded.
+ channel = self.make_request(
+ "POST",
+ "/createRoom",
+ json.dumps({"invite_3pid": invited_3pids}).encode("utf8"),
+ )
+ self.assertEqual(200, channel.code)
+
+ # Check that do_3pid_invite was called the right amount of time
+ self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids))
+
+ # Check that the callback was called with the right arguments.
+ expected_call_args = ((self.user_id, [], invited_3pids),)
+ self.assertEquals(
+ callback_mock.call_args,
+ expected_call_args,
+ callback_mock.call_args,
+ )
+
+ # Now deny any room creation.
+ return_value = False
+
+ # Create a room and invite the 3PIDs, and check that it failed.
+ channel = self.make_request(
+ "POST",
+ "/createRoom",
+ json.dumps({"invite_3pid": invited_3pids}).encode("utf8"),
+ )
+ self.assertEqual(403, channel.code)
+
+ # Check that do_3pid_invite wasn't called this time.
+ self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids))
+
class RoomTopicTestCase(RoomBase):
"""Tests /rooms/$room_id/topic REST events."""
--
cgit 1.5.1
From d138187045dd3c51689c19124d65ee62e37db755 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Fri, 24 Sep 2021 17:09:12 -0500
Subject: Document changes to schema version 61 - 64 (#10917)
As pointed out by @richvdh, https://github.com/matrix-org/synapse/pull/10838#discussion_r715424244
Retroactively summarize `61` - `64`
---
changelog.d/10917.misc | 1 +
synapse/storage/schema/__init__.py | 11 +++++++++++
2 files changed, 12 insertions(+)
create mode 100644 changelog.d/10917.misc
diff --git a/changelog.d/10917.misc b/changelog.d/10917.misc
new file mode 100644
index 0000000000..9ce6eef94b
--- /dev/null
+++ b/changelog.d/10917.misc
@@ -0,0 +1 @@
+Document and summarize changes in schema version `61` - `64`.
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index aa2ce44c6c..573e05a482 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -27,11 +27,22 @@ for more information on how this works.
Changes in SCHEMA_VERSION = 61:
- The `user_stats_historical` and `room_stats_historical` tables are not written and
are not read (previously, they were written but not read).
+ - MSC2716: Add `insertion_events` and `insertion_event_edges` tables to keep track
+ of insertion events in order to navigate historical chunks of messages.
+ - MSC2716: Add `chunk_events` table to track how the chunk is labeled and
+ determines which insertion event it points to.
+
+Changes in SCHEMA_VERSION = 62:
+ - MSC2716: Add `insertion_event_extremities` table that keeps track of which
+ insertion events need to be backfilled.
Changes in SCHEMA_VERSION = 63:
- The `public_room_list_stream` table is not written nor read to
(previously, it was written and read to, but not for any significant purpose).
https://github.com/matrix-org/synapse/pull/10565
+
+Changes in SCHEMA_VERSION = 64:
+ - MSC2716: Rename related tables and columns from "chunks" to "batches".
"""
--
cgit 1.5.1
From 6c83c2710760a4f551d1a925fc9b1a19ae8797c1 Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Mon, 27 Sep 2021 11:29:23 +0100
Subject: Fix race conditions when creating media store and config directories
(#10913)
---
changelog.d/10913.bugfix | 1 +
synapse/config/_base.py | 9 ++-------
synapse/rest/media/v1/media_storage.py | 6 ++----
synapse/rest/media/v1/storage_provider.py | 3 +--
4 files changed, 6 insertions(+), 13 deletions(-)
create mode 100644 changelog.d/10913.bugfix
diff --git a/changelog.d/10913.bugfix b/changelog.d/10913.bugfix
new file mode 100644
index 0000000000..a0015c8241
--- /dev/null
+++ b/changelog.d/10913.bugfix
@@ -0,0 +1 @@
+Fix race conditions when creating media store and config directories.
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 2cc242782a..d974a1a2a8 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -200,11 +200,7 @@ class Config:
@classmethod
def ensure_directory(cls, dir_path):
dir_path = cls.abspath(dir_path)
- try:
- os.makedirs(dir_path)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
+ os.makedirs(dir_path, exist_ok=True)
if not os.path.isdir(dir_path):
raise ConfigError("%s is not a directory" % (dir_path,))
return dir_path
@@ -693,8 +689,7 @@ class RootConfig:
open_private_ports=config_args.open_private_ports,
)
- if not path_exists(config_dir_path):
- os.makedirs(config_dir_path)
+ os.makedirs(config_dir_path, exist_ok=True)
with open(config_path, "w") as config_file:
config_file.write(config_str)
config_file.write("\n\n# vim:ft=yaml")
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 01fada8fb5..fca239d8c7 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -132,8 +132,7 @@ class MediaStorage:
fname = os.path.join(self.local_media_directory, path)
dirname = os.path.dirname(fname)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
+ os.makedirs(dirname, exist_ok=True)
finished_called = [False]
@@ -244,8 +243,7 @@ class MediaStorage:
return legacy_local_path
dirname = os.path.dirname(local_path)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
+ os.makedirs(dirname, exist_ok=True)
for provider in self.storage_providers:
res: Any = await provider.fetch(path, file_info)
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 289e4297f2..da78fcee5e 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -138,8 +138,7 @@ class FileStorageProviderBackend(StorageProvider):
backup_fname = os.path.join(self.base_directory, path)
dirname = os.path.dirname(backup_fname)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
+ os.makedirs(dirname, exist_ok=True)
await defer_to_thread(
self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname
--
cgit 1.5.1
From f7768f62cbf7579a1a91e694f83d47d275373369 Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Mon, 27 Sep 2021 12:55:27 +0100
Subject: Avoid storing URL cache files in storage providers (#10911)
URL cache files are short-lived and it does not make sense to offload
them (eg. to the cloud) or back them up.
---
changelog.d/10911.bugfix | 1 +
docs/upgrade.md | 7 ++
synapse/rest/media/v1/filepath.py | 11 ++-
synapse/rest/media/v1/preview_url_resource.py | 1 -
synapse/rest/media/v1/storage_provider.py | 10 ++
tests/rest/media/v1/test_url_preview.py | 130 ++++++++++++++++++++++++++
6 files changed, 154 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/10911.bugfix
diff --git a/changelog.d/10911.bugfix b/changelog.d/10911.bugfix
new file mode 100644
index 0000000000..96e36bb15a
--- /dev/null
+++ b/changelog.d/10911.bugfix
@@ -0,0 +1 @@
+Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space.
diff --git a/docs/upgrade.md b/docs/upgrade.md
index f9b832cb3f..a8221372df 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -85,6 +85,13 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.44.0
+
+## The URL preview cache is no longer mirrored to storage providers
+The `url_cache/` and `url_cache_thumbnails/` directories in the media store are
+no longer mirrored to storage providers. These two directories can be safely
+deleted from any configured storage providers to reclaim space.
+
# Upgrading to v1.43.0
## The spaces summary APIs can now be handled by workers
diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py
index 39bbe4e874..08bd85f664 100644
--- a/synapse/rest/media/v1/filepath.py
+++ b/synapse/rest/media/v1/filepath.py
@@ -195,23 +195,24 @@ class MediaFilePaths:
url_cache_thumbnail = _wrap_in_base_path(url_cache_thumbnail_rel)
- def url_cache_thumbnail_directory(self, media_id: str) -> str:
+ def url_cache_thumbnail_directory_rel(self, media_id: str) -> str:
# Media id is of the form
# E.g.: 2017-09-28-fsdRDt24DS234dsf
if NEW_FORMAT_ID_RE.match(media_id):
- return os.path.join(
- self.base_path, "url_cache_thumbnails", media_id[:10], media_id[11:]
- )
+ return os.path.join("url_cache_thumbnails", media_id[:10], media_id[11:])
else:
return os.path.join(
- self.base_path,
"url_cache_thumbnails",
media_id[0:2],
media_id[2:4],
media_id[4:],
)
+ url_cache_thumbnail_directory = _wrap_in_base_path(
+ url_cache_thumbnail_directory_rel
+ )
+
def url_cache_thumbnail_dirs_to_delete(self, media_id: str) -> List[str]:
"The dirs to try and remove if we delete the media_id thumbnails"
# Media id is of the form
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 0b0c4d6469..79a42b2455 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -485,7 +485,6 @@ class PreviewUrlResource(DirectServeJsonResource):
async def _expire_url_cache_data(self) -> None:
"""Clean up expired url cache content, media and thumbnails."""
- # TODO: Delete from backup media store
assert self._worker_run_media_background_jobs
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index da78fcee5e..18bf977d3d 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -93,6 +93,11 @@ class StorageProviderWrapper(StorageProvider):
if file_info.server_name and not self.store_remote:
return None
+ if file_info.url_cache:
+ # The URL preview cache is short lived and not worth offloading or
+ # backing up.
+ return None
+
if self.store_synchronous:
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
@@ -110,6 +115,11 @@ class StorageProviderWrapper(StorageProvider):
run_in_background(store)
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ if file_info.url_cache:
+ # Files in the URL preview cache definitely aren't stored here,
+ # so avoid any potentially slow I/O or network access.
+ return None
+
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
return await maybe_awaitable(self.backend.fetch(path, file_info))
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index d83dfacfed..4d09b5d07e 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -21,6 +21,7 @@ from twisted.internet.error import DNSLookupError
from twisted.test.proto_helpers import AccumulatingProtocol
from synapse.config.oembed import OEmbedEndpointConfig
+from synapse.util.stringutils import parse_and_validate_mxc_uri
from tests import unittest
from tests.server import FakeTransport
@@ -721,3 +722,132 @@ class URLPreviewTests(unittest.HomeserverTestCase):
"og:description": "Content Preview",
},
)
+
+ def _download_image(self):
+ """Downloads an image into the URL cache.
+
+ Returns:
+ A (host, media_id) tuple representing the MXC URI of the image.
+ """
+ self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://cdn.twitter.com/matrixdotorg",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: image/png\r\n\r\n"
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ mxc_uri = body["og:image"]
+ host, _port, media_id = parse_and_validate_mxc_uri(mxc_uri)
+ self.assertIsNone(_port)
+ return host, media_id
+
+ def test_storage_providers_exclude_files(self):
+ """Test that files are not stored in or fetched from storage providers."""
+ host, media_id = self._download_image()
+
+ rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id)
+ media_store_path = os.path.join(self.media_store_path, rel_file_path)
+ storage_provider_path = os.path.join(self.storage_path, rel_file_path)
+
+ # Check storage
+ self.assertTrue(os.path.isfile(media_store_path))
+ self.assertFalse(
+ os.path.isfile(storage_provider_path),
+ "URL cache file was unexpectedly stored in a storage provider",
+ )
+
+ # Check fetching
+ channel = self.make_request(
+ "GET",
+ f"download/{host}/{media_id}",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ # Move cached file into the storage provider
+ os.makedirs(os.path.dirname(storage_provider_path), exist_ok=True)
+ os.rename(media_store_path, storage_provider_path)
+
+ channel = self.make_request(
+ "GET",
+ f"download/{host}/{media_id}",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(
+ channel.code,
+ 404,
+ "URL cache file was unexpectedly retrieved from a storage provider",
+ )
+
+ def test_storage_providers_exclude_thumbnails(self):
+ """Test that thumbnails are not stored in or fetched from storage providers."""
+ host, media_id = self._download_image()
+
+ rel_thumbnail_path = (
+ self.preview_url.filepaths.url_cache_thumbnail_directory_rel(media_id)
+ )
+ media_store_thumbnail_path = os.path.join(
+ self.media_store_path, rel_thumbnail_path
+ )
+ storage_provider_thumbnail_path = os.path.join(
+ self.storage_path, rel_thumbnail_path
+ )
+
+ # Check storage
+ self.assertTrue(os.path.isdir(media_store_thumbnail_path))
+ self.assertFalse(
+ os.path.isdir(storage_provider_thumbnail_path),
+ "URL cache thumbnails were unexpectedly stored in a storage provider",
+ )
+
+ # Check fetching
+ channel = self.make_request(
+ "GET",
+ f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ # Remove the original, otherwise thumbnails will regenerate
+ rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id)
+ media_store_path = os.path.join(self.media_store_path, rel_file_path)
+ os.remove(media_store_path)
+
+ # Move cached thumbnails into the storage provider
+ os.makedirs(os.path.dirname(storage_provider_thumbnail_path), exist_ok=True)
+ os.rename(media_store_thumbnail_path, storage_provider_thumbnail_path)
+
+ channel = self.make_request(
+ "GET",
+ f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(
+ channel.code,
+ 404,
+ "URL cache thumbnail was unexpectedly retrieved from a storage provider",
+ )
--
cgit 1.5.1
From d37841787a9e152938ddb39af5bc1d93d04bc640 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 27 Sep 2021 15:39:49 +0100
Subject: Sign the git tag in release script (#10925)
---
changelog.d/10925.misc | 1 +
scripts-dev/release.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10925.misc
diff --git a/changelog.d/10925.misc b/changelog.d/10925.misc
new file mode 100644
index 0000000000..0c8027ecc2
--- /dev/null
+++ b/changelog.d/10925.misc
@@ -0,0 +1 @@
+Update release script to sign the newly created git tags.
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index a339260c43..ab2d860ab8 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -276,7 +276,7 @@ def tag(gh_token: Optional[str]):
if click.confirm("Edit text?", default=False):
changes = click.edit(changes, require_save=False)
- repo.create_tag(tag_name, message=changes)
+ repo.create_tag(tag_name, message=changes, sign=True)
if not click.confirm("Push tag to GitHub?", default=True):
print("")
--
cgit 1.5.1
From 707d5e4e48e839dabd34e4b67426fe8382a2c978 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 10:37:58 +0100
Subject: Encode JSON responses on a thread in C, mk2 (#10905)
Currently we use `JsonEncoder.iterencode` to write JSON responses, which ensures that we don't block the main reactor thread when encoding huge objects. The downside to this is that `iterencode` falls back to using a pure Python encoder that is *much* less efficient and can easily burn a lot of CPU for huge responses. To fix this, while still ensuring we don't block the reactor loop, we encode the JSON on a threadpool using the standard `JsonEncoder.encode` functions, which is backed by a C library.
Doing so, however, requires `respond_with_json` to have access to the reactor, which it previously didn't. There are two ways of doing this:
1. threading through the reactor object, which is a bit fiddly as e.g. `DirectServeJsonResource` doesn't currently take a reactor, but is exposed to modules and so is a PITA to change; or
2. expose the reactor in `SynapseRequest`, which requires updating a bunch of servlet types.
I went with the latter as that is just a mechanical change, and I think makes sense as a request already has a reactor associated with it (via its http channel).
---
changelog.d/10905.feature | 1 +
synapse/http/server.py | 72 +++++++++++++++++++++++++++++++++++----------
synapse/push/emailpusher.py | 2 +-
synapse/util/iterutils.py | 19 ++++++++++--
4 files changed, 76 insertions(+), 18 deletions(-)
create mode 100644 changelog.d/10905.feature
diff --git a/changelog.d/10905.feature b/changelog.d/10905.feature
new file mode 100644
index 0000000000..07e7b2c6a7
--- /dev/null
+++ b/changelog.d/10905.feature
@@ -0,0 +1 @@
+Speed up responding with large JSON objects to requests.
diff --git a/synapse/http/server.py b/synapse/http/server.py
index e28b56abb9..1a50305dcf 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -21,7 +21,6 @@ import types
import urllib
from http import HTTPStatus
from inspect import isawaitable
-from io import BytesIO
from typing import (
Any,
Awaitable,
@@ -37,7 +36,7 @@ from typing import (
)
import jinja2
-from canonicaljson import iterencode_canonical_json
+from canonicaljson import encode_canonical_json
from typing_extensions import Protocol
from zope.interface import implementer
@@ -45,7 +44,7 @@ from twisted.internet import defer, interfaces
from twisted.python import failure
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET, Request
-from twisted.web.static import File, NoRangeStaticProducer
+from twisted.web.static import File
from twisted.web.util import redirectTo
from synapse.api.errors import (
@@ -56,10 +55,11 @@ from synapse.api.errors import (
UnrecognizedRequestError,
)
from synapse.http.site import SynapseRequest
-from synapse.logging.context import preserve_fn
+from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
from synapse.logging.opentracing import trace_servlet
from synapse.util import json_encoder
from synapse.util.caches import intern_dict
+from synapse.util.iterutils import chunk_seq
logger = logging.getLogger(__name__)
@@ -620,12 +620,11 @@ class _ByteProducer:
self._request = None
-def _encode_json_bytes(json_object: Any) -> Iterator[bytes]:
+def _encode_json_bytes(json_object: Any) -> bytes:
"""
Encode an object into JSON. Returns an iterator of bytes.
"""
- for chunk in json_encoder.iterencode(json_object):
- yield chunk.encode("utf-8")
+ return json_encoder.encode(json_object).encode("utf-8")
def respond_with_json(
@@ -659,7 +658,7 @@ def respond_with_json(
return None
if canonical_json:
- encoder = iterencode_canonical_json
+ encoder = encode_canonical_json
else:
encoder = _encode_json_bytes
@@ -670,7 +669,9 @@ def respond_with_json(
if send_cors:
set_cors_headers(request)
- _ByteProducer(request, encoder(json_object))
+ run_in_background(
+ _async_write_json_to_request_in_thread, request, encoder, json_object
+ )
return NOT_DONE_YET
@@ -706,15 +707,56 @@ def respond_with_json_bytes(
if send_cors:
set_cors_headers(request)
- # note that this is zero-copy (the bytesio shares a copy-on-write buffer with
- # the original `bytes`).
- bytes_io = BytesIO(json_bytes)
-
- producer = NoRangeStaticProducer(request, bytes_io)
- producer.start()
+ _write_bytes_to_request(request, json_bytes)
return NOT_DONE_YET
+async def _async_write_json_to_request_in_thread(
+ request: SynapseRequest,
+ json_encoder: Callable[[Any], bytes],
+ json_object: Any,
+):
+ """Encodes the given JSON object on a thread and then writes it to the
+ request.
+
+ This is done so that encoding large JSON objects doesn't block the reactor
+ thread.
+
+ Note: We don't use JsonEncoder.iterencode here as that falls back to the
+ Python implementation (rather than the C backend), which is *much* more
+ expensive.
+ """
+
+ json_str = await defer_to_thread(request.reactor, json_encoder, json_object)
+
+ _write_bytes_to_request(request, json_str)
+
+
+def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
+ """Writes the bytes to the request using an appropriate producer.
+
+ Note: This should be used instead of `Request.write` to correctly handle
+ large response bodies.
+ """
+
+ # The problem with dumping all of the response into the `Request` object at
+ # once (via `Request.write`) is that doing so starts the timeout for the
+ # next request to be received: so if it takes longer than 60s to stream back
+ # the response to the client, the client never gets it.
+ #
+ # The correct solution is to use a Producer; then the timeout is only
+ # started once all of the content is sent over the TCP connection.
+
+ # To make sure we don't write all of the bytes at once we split it up into
+ # chunks.
+ chunk_size = 4096
+ bytes_generator = chunk_seq(bytes_to_write, chunk_size)
+
+ # We use a `_ByteProducer` here rather than `NoRangeStaticProducer` as the
+ # unit tests can't cope with being given a pull producer.
+ _ByteProducer(request, bytes_generator)
+
+
def set_cors_headers(request: Request):
"""Set the CORS headers so that javascript running in a web browsers can
use this API
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index e08e125cb8..cf5abdfbda 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -184,7 +184,7 @@ class EmailPusher(Pusher):
should_notify_at = max(notif_ready_at, room_ready_at)
- if should_notify_at < self.clock.time_msec():
+ if should_notify_at <= self.clock.time_msec():
# one of our notifications is ready for sending, so we send
# *one* email updating the user on their notifications,
# we then consider all previously outstanding notifications
diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
index 8ac3eab2f5..4938ddf703 100644
--- a/synapse/util/iterutils.py
+++ b/synapse/util/iterutils.py
@@ -21,13 +21,28 @@ from typing import (
Iterable,
Iterator,
Mapping,
- Sequence,
Set,
+ Sized,
Tuple,
TypeVar,
)
+from typing_extensions import Protocol
+
T = TypeVar("T")
+S = TypeVar("S", bound="_SelfSlice")
+
+
+class _SelfSlice(Sized, Protocol):
+ """A helper protocol that matches types where taking a slice results in the
+ same type being returned.
+
+ This is more specific than `Sequence`, which allows another `Sequence` to be
+ returned.
+ """
+
+ def __getitem__(self: S, i: slice) -> S:
+ ...
def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]:
@@ -46,7 +61,7 @@ def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]:
return iter(lambda: tuple(islice(sourceiter, size)), ())
-def chunk_seq(iseq: Sequence[T], maxlen: int) -> Iterable[Sequence[T]]:
+def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]:
"""Split the given sequence into chunks of the given size
The last chunk may be shorter than the given size.
--
cgit 1.5.1
From a8bbf085761095c49b04af1a08fc67b1a781617d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 12:13:51 +0100
Subject: Fix debian package builds. (#10931)
This was due to dh-virtualenv builds being broken due to Shpinx removing
deprecated APIs.
---
changelog.d/10931.bugfix | 1 +
docker/Dockerfile-dhvirtualenv | 5 +++--
2 files changed, 4 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/10931.bugfix
diff --git a/changelog.d/10931.bugfix b/changelog.d/10931.bugfix
new file mode 100644
index 0000000000..3f30c9ccf1
--- /dev/null
+++ b/changelog.d/10931.bugfix
@@ -0,0 +1 @@
+Fix debian builds due to dh-virtualenv no longer being able to build their docs.
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index 017be8555e..1dd88140c7 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -47,8 +47,9 @@ RUN apt-get update -qq -o Acquire::Languages=none \
&& cd /dh-virtualenv \
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends"
-# build it
-RUN cd /dh-virtualenv && dpkg-buildpackage -us -uc -b
+# Build it. Note that building the docs doesn't work due to differences in
+# Sphinx APIs across versions/distros.
+RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
###
### Stage 1
--
cgit 1.5.1
From 3c50192d3f564ecc2e70441157f309610bbee1cd Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 13:42:21 +0100
Subject: 1.44.0rc1
---
CHANGES.md | 72 +++++++++++++++++++++++++++++++++++++++++++++++
changelog.d/10659.misc | 1 -
changelog.d/10690.bugfix | 1 -
changelog.d/10776.feature | 1 -
changelog.d/10777.misc | 1 -
changelog.d/10782.bugfix | 1 -
changelog.d/10785.misc | 1 -
changelog.d/10796.misc | 1 -
changelog.d/10807.bugfix | 1 -
changelog.d/10810.bugfix | 1 -
changelog.d/10812.misc | 1 -
changelog.d/10814.feature | 1 -
changelog.d/10815.misc | 1 -
changelog.d/10816.misc | 1 -
changelog.d/10817.misc | 1 -
changelog.d/10819.feature | 1 -
changelog.d/10820.misc | 1 -
changelog.d/10823.misc | 1 -
changelog.d/10826.misc | 2 --
changelog.d/10827.bugfix | 1 -
changelog.d/10829.misc | 1 -
changelog.d/10831.misc | 1 -
changelog.d/10833.misc | 1 -
changelog.d/10834.misc | 1 -
changelog.d/10835.misc | 1 -
changelog.d/10838.misc | 1 -
changelog.d/10839.misc | 1 -
changelog.d/10843.bugfix | 1 -
changelog.d/10845.doc | 1 -
changelog.d/10856.misc | 1 -
changelog.d/10859.bugfix | 1 -
changelog.d/10865.doc | 1 -
changelog.d/10867.misc | 1 -
changelog.d/10868.feature | 1 -
changelog.d/10869.doc | 1 -
changelog.d/10873.bugfix | 1 -
changelog.d/10875.bugfix | 1 -
changelog.d/10879.misc | 1 -
changelog.d/10880.misc | 1 -
changelog.d/10881.bugfix | 1 -
changelog.d/10883.misc | 1 -
changelog.d/10884.misc | 1 -
changelog.d/10885.misc | 1 -
changelog.d/10887.bugfix | 1 -
changelog.d/10889.misc | 1 -
changelog.d/10891.misc | 1 -
changelog.d/10893.misc | 1 -
changelog.d/10896.misc | 1 -
changelog.d/10897.misc | 1 -
changelog.d/10898.feature | 1 -
changelog.d/10901.misc | 1 -
changelog.d/10905.feature | 1 -
changelog.d/10906.misc | 1 -
changelog.d/10907.bugfix | 1 -
changelog.d/10911.bugfix | 1 -
changelog.d/10913.bugfix | 1 -
changelog.d/10917.misc | 1 -
changelog.d/10925.misc | 1 -
changelog.d/10931.bugfix | 1 -
debian/changelog | 6 ++++
synapse/__init__.py | 2 +-
61 files changed, 79 insertions(+), 60 deletions(-)
delete mode 100644 changelog.d/10659.misc
delete mode 100644 changelog.d/10690.bugfix
delete mode 100644 changelog.d/10776.feature
delete mode 100644 changelog.d/10777.misc
delete mode 100644 changelog.d/10782.bugfix
delete mode 100644 changelog.d/10785.misc
delete mode 100644 changelog.d/10796.misc
delete mode 100644 changelog.d/10807.bugfix
delete mode 100644 changelog.d/10810.bugfix
delete mode 100644 changelog.d/10812.misc
delete mode 100644 changelog.d/10814.feature
delete mode 100644 changelog.d/10815.misc
delete mode 100644 changelog.d/10816.misc
delete mode 100644 changelog.d/10817.misc
delete mode 100644 changelog.d/10819.feature
delete mode 100644 changelog.d/10820.misc
delete mode 100644 changelog.d/10823.misc
delete mode 100644 changelog.d/10826.misc
delete mode 100644 changelog.d/10827.bugfix
delete mode 100644 changelog.d/10829.misc
delete mode 100644 changelog.d/10831.misc
delete mode 100644 changelog.d/10833.misc
delete mode 100644 changelog.d/10834.misc
delete mode 100644 changelog.d/10835.misc
delete mode 100644 changelog.d/10838.misc
delete mode 100644 changelog.d/10839.misc
delete mode 100644 changelog.d/10843.bugfix
delete mode 100644 changelog.d/10845.doc
delete mode 100644 changelog.d/10856.misc
delete mode 100644 changelog.d/10859.bugfix
delete mode 100644 changelog.d/10865.doc
delete mode 100644 changelog.d/10867.misc
delete mode 100644 changelog.d/10868.feature
delete mode 100644 changelog.d/10869.doc
delete mode 100644 changelog.d/10873.bugfix
delete mode 100644 changelog.d/10875.bugfix
delete mode 100644 changelog.d/10879.misc
delete mode 100644 changelog.d/10880.misc
delete mode 100644 changelog.d/10881.bugfix
delete mode 100644 changelog.d/10883.misc
delete mode 100644 changelog.d/10884.misc
delete mode 100644 changelog.d/10885.misc
delete mode 100644 changelog.d/10887.bugfix
delete mode 100644 changelog.d/10889.misc
delete mode 100644 changelog.d/10891.misc
delete mode 100644 changelog.d/10893.misc
delete mode 100644 changelog.d/10896.misc
delete mode 100644 changelog.d/10897.misc
delete mode 100644 changelog.d/10898.feature
delete mode 100644 changelog.d/10901.misc
delete mode 100644 changelog.d/10905.feature
delete mode 100644 changelog.d/10906.misc
delete mode 100644 changelog.d/10907.bugfix
delete mode 100644 changelog.d/10911.bugfix
delete mode 100644 changelog.d/10913.bugfix
delete mode 100644 changelog.d/10917.misc
delete mode 100644 changelog.d/10925.misc
delete mode 100644 changelog.d/10931.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index 652f4b7955..da4d98ac2b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,75 @@
+Synapse 1.44.0rc1 (2021-09-28)
+==============================
+
+Features
+--------
+
+- Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event. ([\#10776](https://github.com/matrix-org/synapse/issues/10776))
+- Improve oEmbed previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819))
+- Speed up responding with large JSON objects to requests. ([\#10868](https://github.com/matrix-org/synapse/issues/10868), [\#10905](https://github.com/matrix-org/synapse/issues/10905))
+- Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. ([\#10898](https://github.com/matrix-org/synapse/issues/10898))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka. ([\#10690](https://github.com/matrix-org/synapse/issues/10690))
+- Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory. ([\#10782](https://github.com/matrix-org/synapse/issues/10782))
+- Allow sending a membership event to unban a user. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807))
+- Fix a case where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810))
+- Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827))
+- Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843))
+- Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859))
+- Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873))
+- Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875))
+- Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881))
+- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887))
+- Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected. ([\#10907](https://github.com/matrix-org/synapse/issues/10907))
+- Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911))
+- Fix race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913))
+- Fix debian builds due to dh-virtualenv no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931))
+
+
+Improved Documentation
+----------------------
+
+- Fix some crashes in the Module API example code, by adding JSON encoding/decoding. ([\#10845](https://github.com/matrix-org/synapse/issues/10845))
+- Add developer documentation about experimental configuration flags. ([\#10865](https://github.com/matrix-org/synapse/issues/10865))
+- Properly remove deleted files from GitHub pages when generating the documentation. ([\#10869](https://github.com/matrix-org/synapse/issues/10869))
+
+
+Internal Changes
+----------------
+
+- Fix GitHub Actions config so we can run sytest on synapse from parallel branches. ([\#10659](https://github.com/matrix-org/synapse/issues/10659))
+- Split out [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) meta events to their own fields in the `/batch_send` response. ([\#10777](https://github.com/matrix-org/synapse/issues/10777))
+- Add missing type hints to REST servlets. ([\#10785](https://github.com/matrix-org/synapse/issues/10785), [\#10817](https://github.com/matrix-org/synapse/issues/10817))
+- Simplify the internal logic which maintains the user directory database tables. ([\#10796](https://github.com/matrix-org/synapse/issues/10796))
+- Use direct references to config flags. ([\#10812](https://github.com/matrix-org/synapse/issues/10812), [\#10885](https://github.com/matrix-org/synapse/issues/10885), [\#10893](https://github.com/matrix-org/synapse/issues/10893), [\#10897](https://github.com/matrix-org/synapse/issues/10897))
+- Specify the type of token in generic "Invalid token" error messages. ([\#10815](https://github.com/matrix-org/synapse/issues/10815))
+- Make `StateFilter` frozen so it is hashable. ([\#10816](https://github.com/matrix-org/synapse/issues/10816))
+- Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error. ([\#10820](https://github.com/matrix-org/synapse/issues/10820))
+- Add type hints to the state database. ([\#10823](https://github.com/matrix-org/synapse/issues/10823))
+- Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
+ haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826))
+- Track cache eviction rates more finely in Prometheus' monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829))
+- Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856))
+- Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833))
+- Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834))
+- Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835))
+- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint. ([\#10838](https://github.com/matrix-org/synapse/issues/10838))
+- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839))
+- Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867))
+- Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879))
+- Break down Grafana's cache expiry time series based on reason for eviction---see #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
+- Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901))
+- Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889))
+- Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891))
+- Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. ([\#10906](https://github.com/matrix-org/synapse/issues/10906))
+- Document and summarize changes in schema version `61` - `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917))
+- Update release script to sign the newly created git tags. ([\#10925](https://github.com/matrix-org/synapse/issues/10925))
+
+
Synapse 1.43.0 (2021-09-21)
===========================
diff --git a/changelog.d/10659.misc b/changelog.d/10659.misc
deleted file mode 100644
index d677a521c3..0000000000
--- a/changelog.d/10659.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix GitHub Actions config so we can run sytest on synapse from parallel branches.
\ No newline at end of file
diff --git a/changelog.d/10690.bugfix b/changelog.d/10690.bugfix
deleted file mode 100644
index 059eea7464..0000000000
--- a/changelog.d/10690.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka.
diff --git a/changelog.d/10776.feature b/changelog.d/10776.feature
deleted file mode 100644
index aec0685a3d..0000000000
--- a/changelog.d/10776.feature
+++ /dev/null
@@ -1 +0,0 @@
-Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event.
diff --git a/changelog.d/10777.misc b/changelog.d/10777.misc
deleted file mode 100644
index aed78a16f5..0000000000
--- a/changelog.d/10777.misc
+++ /dev/null
@@ -1 +0,0 @@
-Split out [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) meta events to their own fields in the `/batch_send` response.
diff --git a/changelog.d/10782.bugfix b/changelog.d/10782.bugfix
deleted file mode 100644
index 3e410447cc..0000000000
--- a/changelog.d/10782.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory.
\ No newline at end of file
diff --git a/changelog.d/10785.misc b/changelog.d/10785.misc
deleted file mode 100644
index 39a37b90b1..0000000000
--- a/changelog.d/10785.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add missing type hints to REST servlets.
diff --git a/changelog.d/10796.misc b/changelog.d/10796.misc
deleted file mode 100644
index 1873b2386a..0000000000
--- a/changelog.d/10796.misc
+++ /dev/null
@@ -1 +0,0 @@
-Simplify the internal logic which maintains the user directory database tables.
\ No newline at end of file
diff --git a/changelog.d/10807.bugfix b/changelog.d/10807.bugfix
deleted file mode 100644
index be03f5c738..0000000000
--- a/changelog.d/10807.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Allow sending a membership event to unban a user. Contributed by @aaronraimist.
\ No newline at end of file
diff --git a/changelog.d/10810.bugfix b/changelog.d/10810.bugfix
deleted file mode 100644
index 43e91f1f51..0000000000
--- a/changelog.d/10810.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a case where logging contexts would go missing when federation requests time out.
diff --git a/changelog.d/10812.misc b/changelog.d/10812.misc
deleted file mode 100644
index 586a0b3a96..0000000000
--- a/changelog.d/10812.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use direct references to config flags.
diff --git a/changelog.d/10814.feature b/changelog.d/10814.feature
deleted file mode 100644
index 4fa95a6cc9..0000000000
--- a/changelog.d/10814.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve oEmbed previews by processing the author name, photo, and video information.
diff --git a/changelog.d/10815.misc b/changelog.d/10815.misc
deleted file mode 100644
index fc2534dc14..0000000000
--- a/changelog.d/10815.misc
+++ /dev/null
@@ -1 +0,0 @@
-Specify the type of token in generic "Invalid token" error messages.
\ No newline at end of file
diff --git a/changelog.d/10816.misc b/changelog.d/10816.misc
deleted file mode 100644
index 2ca55b334a..0000000000
--- a/changelog.d/10816.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make `StateFilter` frozen so it is hashable.
diff --git a/changelog.d/10817.misc b/changelog.d/10817.misc
deleted file mode 100644
index 39a37b90b1..0000000000
--- a/changelog.d/10817.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add missing type hints to REST servlets.
diff --git a/changelog.d/10819.feature b/changelog.d/10819.feature
deleted file mode 100644
index 4fa95a6cc9..0000000000
--- a/changelog.d/10819.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve oEmbed previews by processing the author name, photo, and video information.
diff --git a/changelog.d/10820.misc b/changelog.d/10820.misc
deleted file mode 100644
index 4373bf6f6b..0000000000
--- a/changelog.d/10820.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error.
\ No newline at end of file
diff --git a/changelog.d/10823.misc b/changelog.d/10823.misc
deleted file mode 100644
index 0532969900..0000000000
--- a/changelog.d/10823.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to the state database.
diff --git a/changelog.d/10826.misc b/changelog.d/10826.misc
deleted file mode 100644
index 53e56fc362..0000000000
--- a/changelog.d/10826.misc
+++ /dev/null
@@ -1,2 +0,0 @@
-Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
-haven't synced recently.
diff --git a/changelog.d/10827.bugfix b/changelog.d/10827.bugfix
deleted file mode 100644
index 11a618bf82..0000000000
--- a/changelog.d/10827.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters.
diff --git a/changelog.d/10829.misc b/changelog.d/10829.misc
deleted file mode 100644
index ac5fd6b047..0000000000
--- a/changelog.d/10829.misc
+++ /dev/null
@@ -1 +0,0 @@
-Track cache eviction rates more finely in Prometheus' monitoring.
\ No newline at end of file
diff --git a/changelog.d/10831.misc b/changelog.d/10831.misc
deleted file mode 100644
index f09af2e00a..0000000000
--- a/changelog.d/10831.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add missing type hints to handlers.
diff --git a/changelog.d/10833.misc b/changelog.d/10833.misc
deleted file mode 100644
index f23c0a1a02..0000000000
--- a/changelog.d/10833.misc
+++ /dev/null
@@ -1 +0,0 @@
-Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data.
diff --git a/changelog.d/10834.misc b/changelog.d/10834.misc
deleted file mode 100644
index 037695e6e9..0000000000
--- a/changelog.d/10834.misc
+++ /dev/null
@@ -1 +0,0 @@
-Factor out PNG image data to a constant to be used in several tests.
diff --git a/changelog.d/10835.misc b/changelog.d/10835.misc
deleted file mode 100644
index 0c3d13477e..0000000000
--- a/changelog.d/10835.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a test to ensure state events sent by modules get persisted correctly.
diff --git a/changelog.d/10838.misc b/changelog.d/10838.misc
deleted file mode 100644
index b1977d0a2e..0000000000
--- a/changelog.d/10838.misc
+++ /dev/null
@@ -1 +0,0 @@
-Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint.
diff --git a/changelog.d/10839.misc b/changelog.d/10839.misc
deleted file mode 100644
index d0e10f31d5..0000000000
--- a/changelog.d/10839.misc
+++ /dev/null
@@ -1 +0,0 @@
-Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`.
diff --git a/changelog.d/10843.bugfix b/changelog.d/10843.bugfix
deleted file mode 100644
index 5027a1dbef..0000000000
--- a/changelog.d/10843.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite.
diff --git a/changelog.d/10845.doc b/changelog.d/10845.doc
deleted file mode 100644
index a13c845ae6..0000000000
--- a/changelog.d/10845.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix some crashes in the Module API example code, by adding JSON encoding/decoding.
diff --git a/changelog.d/10856.misc b/changelog.d/10856.misc
deleted file mode 100644
index f09af2e00a..0000000000
--- a/changelog.d/10856.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add missing type hints to handlers.
diff --git a/changelog.d/10859.bugfix b/changelog.d/10859.bugfix
deleted file mode 100644
index c1bfe22d54..0000000000
--- a/changelog.d/10859.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters.
\ No newline at end of file
diff --git a/changelog.d/10865.doc b/changelog.d/10865.doc
deleted file mode 100644
index deeb0eedf3..0000000000
--- a/changelog.d/10865.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add developer documentation about experimental configuration flags.
diff --git a/changelog.d/10867.misc b/changelog.d/10867.misc
deleted file mode 100644
index 01e51fbc6e..0000000000
--- a/changelog.d/10867.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to `synapse.http.site`.
diff --git a/changelog.d/10868.feature b/changelog.d/10868.feature
deleted file mode 100644
index 07e7b2c6a7..0000000000
--- a/changelog.d/10868.feature
+++ /dev/null
@@ -1 +0,0 @@
-Speed up responding with large JSON objects to requests.
diff --git a/changelog.d/10869.doc b/changelog.d/10869.doc
deleted file mode 100644
index c117386072..0000000000
--- a/changelog.d/10869.doc
+++ /dev/null
@@ -1 +0,0 @@
-Properly remove deleted files from GitHub pages when generating the documentation.
diff --git a/changelog.d/10873.bugfix b/changelog.d/10873.bugfix
deleted file mode 100644
index 32b2e50fd9..0000000000
--- a/changelog.d/10873.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database.
diff --git a/changelog.d/10875.bugfix b/changelog.d/10875.bugfix
deleted file mode 100644
index 6f370da5c7..0000000000
--- a/changelog.d/10875.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper.
diff --git a/changelog.d/10879.misc b/changelog.d/10879.misc
deleted file mode 100644
index acc04930fa..0000000000
--- a/changelog.d/10879.misc
+++ /dev/null
@@ -1 +0,0 @@
-Include outlier status when we log V2 or V3 events.
diff --git a/changelog.d/10880.misc b/changelog.d/10880.misc
deleted file mode 100644
index 5f58d6198c..0000000000
--- a/changelog.d/10880.misc
+++ /dev/null
@@ -1 +0,0 @@
-Break down Grafana's cache expiry time series based on reason for eviction---see #10829.
\ No newline at end of file
diff --git a/changelog.d/10881.bugfix b/changelog.d/10881.bugfix
deleted file mode 100644
index 0a8905cc46..0000000000
--- a/changelog.d/10881.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked.
diff --git a/changelog.d/10883.misc b/changelog.d/10883.misc
deleted file mode 100644
index 9a765435db..0000000000
--- a/changelog.d/10883.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/10884.misc b/changelog.d/10884.misc
deleted file mode 100644
index 9a765435db..0000000000
--- a/changelog.d/10884.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/10885.misc b/changelog.d/10885.misc
deleted file mode 100644
index 586a0b3a96..0000000000
--- a/changelog.d/10885.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use direct references to config flags.
diff --git a/changelog.d/10887.bugfix b/changelog.d/10887.bugfix
deleted file mode 100644
index 2d1f67489a..0000000000
--- a/changelog.d/10887.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231).
diff --git a/changelog.d/10889.misc b/changelog.d/10889.misc
deleted file mode 100644
index 6d60188f55..0000000000
--- a/changelog.d/10889.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some unnecessary parentheses in places around the codebase.
\ No newline at end of file
diff --git a/changelog.d/10891.misc b/changelog.d/10891.misc
deleted file mode 100644
index 6eecea4065..0000000000
--- a/changelog.d/10891.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve type hinting in the user directory code.
\ No newline at end of file
diff --git a/changelog.d/10893.misc b/changelog.d/10893.misc
deleted file mode 100644
index 586a0b3a96..0000000000
--- a/changelog.d/10893.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use direct references to config flags.
diff --git a/changelog.d/10896.misc b/changelog.d/10896.misc
deleted file mode 100644
index 41de995842..0000000000
--- a/changelog.d/10896.misc
+++ /dev/null
@@ -1 +0,0 @@
- Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/10897.misc b/changelog.d/10897.misc
deleted file mode 100644
index 586a0b3a96..0000000000
--- a/changelog.d/10897.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use direct references to config flags.
diff --git a/changelog.d/10898.feature b/changelog.d/10898.feature
deleted file mode 100644
index 97fa39fd0c..0000000000
--- a/changelog.d/10898.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes.
diff --git a/changelog.d/10901.misc b/changelog.d/10901.misc
deleted file mode 100644
index 9a765435db..0000000000
--- a/changelog.d/10901.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/10905.feature b/changelog.d/10905.feature
deleted file mode 100644
index 07e7b2c6a7..0000000000
--- a/changelog.d/10905.feature
+++ /dev/null
@@ -1 +0,0 @@
-Speed up responding with large JSON objects to requests.
diff --git a/changelog.d/10906.misc b/changelog.d/10906.misc
deleted file mode 100644
index 20a1cbfbd0..0000000000
--- a/changelog.d/10906.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker.
\ No newline at end of file
diff --git a/changelog.d/10907.bugfix b/changelog.d/10907.bugfix
deleted file mode 100644
index 601b341f9f..0000000000
--- a/changelog.d/10907.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected.
diff --git a/changelog.d/10911.bugfix b/changelog.d/10911.bugfix
deleted file mode 100644
index 96e36bb15a..0000000000
--- a/changelog.d/10911.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space.
diff --git a/changelog.d/10913.bugfix b/changelog.d/10913.bugfix
deleted file mode 100644
index a0015c8241..0000000000
--- a/changelog.d/10913.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix race conditions when creating media store and config directories.
diff --git a/changelog.d/10917.misc b/changelog.d/10917.misc
deleted file mode 100644
index 9ce6eef94b..0000000000
--- a/changelog.d/10917.misc
+++ /dev/null
@@ -1 +0,0 @@
-Document and summarize changes in schema version `61` - `64`.
diff --git a/changelog.d/10925.misc b/changelog.d/10925.misc
deleted file mode 100644
index 0c8027ecc2..0000000000
--- a/changelog.d/10925.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update release script to sign the newly created git tags.
diff --git a/changelog.d/10931.bugfix b/changelog.d/10931.bugfix
deleted file mode 100644
index 3f30c9ccf1..0000000000
--- a/changelog.d/10931.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix debian builds due to dh-virtualenv no longer being able to build their docs.
diff --git a/debian/changelog b/debian/changelog
index 4b07d04128..191bb97c5e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.44.0~rc1) stable; urgency=medium
+
+ * New synapse release 1.44.0~rc1.
+
+ -- Synapse Packaging team Tue, 28 Sep 2021 13:41:28 +0100
+
matrix-synapse-py3 (1.43.0) stable; urgency=medium
* New synapse release 1.43.0.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 5f5cff1dfd..a1fec8ad2b 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.43.0"
+__version__ = "1.44.0rc1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
--
cgit 1.5.1
From c3ccad7785cd71372673136f329d5fa098ab9f04 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Tue, 28 Sep 2021 08:44:19 -0400
Subject: Only do restricted join rules signature checks for room versions 8/9.
(#10927)
Otherwise the presence of a (bogus, unused) field could cause
auth checks to fail.
---
changelog.d/10927.bugfix | 1 +
synapse/event_auth.py | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10927.bugfix
diff --git a/changelog.d/10927.bugfix b/changelog.d/10927.bugfix
new file mode 100644
index 0000000000..fd24288c54
--- /dev/null
+++ b/changelog.d/10927.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8/9 could be applied to earlier room versions in some situations.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index fc50a0e71a..5d7c6fa858 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -113,7 +113,8 @@ def check(
raise AuthError(403, "Event not signed by sending server")
is_invite_via_allow_rule = (
- event.type == EventTypes.Member
+ room_version_obj.msc3083_join_rules
+ and event.type == EventTypes.Member
and event.membership == Membership.JOIN
and "join_authorised_via_users_server" in event.content
)
--
cgit 1.5.1
From bc69d49362dfa0ee2e917427c61a7b67c0d78b34 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 13:48:42 +0100
Subject: Fixup changelog
---
CHANGES.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index da4d98ac2b..a7a9abf79c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -19,8 +19,8 @@ Bugfixes
- Fix a case where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810))
- Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827))
- Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843))
-- Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859))
-- Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873))
+- Fix a bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859))
+- Fix a bug introduced in Synapse 1.37.0 which caused `knock` membership events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873))
- Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875))
- Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881))
- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887))
--
cgit 1.5.1
From 2b9d174791833d8eb8ee40d98cc59d187c2eb205 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 13:50:05 +0100
Subject: Fixup changelog
---
CHANGES.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index a7a9abf79c..0b209edd4c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -52,7 +52,7 @@ Internal Changes
- Add type hints to the state database. ([\#10823](https://github.com/matrix-org/synapse/issues/10823))
- Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826))
-- Track cache eviction rates more finely in Prometheus' monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829))
+- Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829))
- Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856))
- Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833))
- Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834))
@@ -61,7 +61,7 @@ Internal Changes
- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839))
- Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867))
- Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879))
-- Break down Grafana's cache expiry time series based on reason for eviction---see #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
+- Break down Grafana's cache expiry time series based on reason for eviction, c.f. #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
- Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901))
- Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889))
- Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891))
--
cgit 1.5.1
From eb2c7e51c460a83b7880eefc66eb9ca6a8adab94 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Tue, 28 Sep 2021 09:24:40 -0400
Subject: Clean-up type hints in server config (#10915)
By using attrs instead of dicts to store configuration.
Also updates some of the attrs classes to use proper type
hints and auto_attribs.
---
changelog.d/10915.misc | 1 +
synapse/config/server.py | 100 ++++++++++++++++++++---------------------
synapse/handlers/pagination.py | 8 ++--
3 files changed, 54 insertions(+), 55 deletions(-)
create mode 100644 changelog.d/10915.misc
diff --git a/changelog.d/10915.misc b/changelog.d/10915.misc
new file mode 100644
index 0000000000..1ce2910ffa
--- /dev/null
+++ b/changelog.d/10915.misc
@@ -0,0 +1 @@
+Clean-up configuration helper classes for the `ServerConfig` class.
diff --git a/synapse/config/server.py b/synapse/config/server.py
index ad8715da29..041412d7ad 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -19,7 +19,7 @@ import logging
import os.path
import re
from textwrap import indent
-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
+from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import attr
import yaml
@@ -184,49 +184,74 @@ KNOWN_RESOURCES = {
@attr.s(frozen=True)
class HttpResourceConfig:
- names = attr.ib(
- type=List[str],
+ names: List[str] = attr.ib(
factory=list,
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore
)
- compress = attr.ib(
- type=bool,
+ compress: bool = attr.ib(
default=False,
validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
)
-@attr.s(frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class HttpListenerConfig:
"""Object describing the http-specific parts of the config of a listener"""
- x_forwarded = attr.ib(type=bool, default=False)
- resources = attr.ib(type=List[HttpResourceConfig], factory=list)
- additional_resources = attr.ib(type=Dict[str, dict], factory=dict)
- tag = attr.ib(type=str, default=None)
+ x_forwarded: bool = False
+ resources: List[HttpResourceConfig] = attr.ib(factory=list)
+ additional_resources: Dict[str, dict] = attr.ib(factory=dict)
+ tag: Optional[str] = None
-@attr.s(frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class ListenerConfig:
"""Object describing the configuration of a single listener."""
- port = attr.ib(type=int, validator=attr.validators.instance_of(int))
- bind_addresses = attr.ib(type=List[str])
- type = attr.ib(type=str, validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
- tls = attr.ib(type=bool, default=False)
+ port: int = attr.ib(validator=attr.validators.instance_of(int))
+ bind_addresses: List[str]
+ type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
+ tls: bool = False
# http_options is only populated if type=http
- http_options = attr.ib(type=Optional[HttpListenerConfig], default=None)
+ http_options: Optional[HttpListenerConfig] = None
-@attr.s(frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class ManholeConfig:
"""Object describing the configuration of the manhole"""
- username = attr.ib(type=str, validator=attr.validators.instance_of(str))
- password = attr.ib(type=str, validator=attr.validators.instance_of(str))
- priv_key = attr.ib(type=Optional[Key])
- pub_key = attr.ib(type=Optional[Key])
+ username: str = attr.ib(validator=attr.validators.instance_of(str))
+ password: str = attr.ib(validator=attr.validators.instance_of(str))
+ priv_key: Optional[Key]
+ pub_key: Optional[Key]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RetentionConfig:
+ """Object describing the configuration of the manhole"""
+
+ interval: int
+ shortest_max_lifetime: Optional[int]
+ longest_max_lifetime: Optional[int]
+
+
+@attr.s(frozen=True)
+class LimitRemoteRoomsConfig:
+ enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
+ complexity: Union[float, int] = attr.ib(
+ validator=attr.validators.instance_of(
+ (float, int) # type: ignore[arg-type] # noqa
+ ),
+ default=1.0,
+ )
+ complexity_error: str = attr.ib(
+ validator=attr.validators.instance_of(str),
+ default=ROOM_COMPLEXITY_TOO_GREAT,
+ )
+ admins_can_join: bool = attr.ib(
+ validator=attr.validators.instance_of(bool), default=False
+ )
class ServerConfig(Config):
@@ -519,7 +544,7 @@ class ServerConfig(Config):
" greater than 'allowed_lifetime_max'"
)
- self.retention_purge_jobs: List[Dict[str, Optional[int]]] = []
+ self.retention_purge_jobs: List[RetentionConfig] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
@@ -553,20 +578,12 @@ class ServerConfig(Config):
)
self.retention_purge_jobs.append(
- {
- "interval": interval,
- "shortest_max_lifetime": shortest_max_lifetime,
- "longest_max_lifetime": longest_max_lifetime,
- }
+ RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
- {
- "interval": self.parse_duration("1d"),
- "shortest_max_lifetime": None,
- "longest_max_lifetime": None,
- }
+ RetentionConfig(self.parse_duration("1d"), None, None)
]
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
@@ -591,25 +608,6 @@ class ServerConfig(Config):
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
- @attr.s
- class LimitRemoteRoomsConfig:
- enabled = attr.ib(
- validator=attr.validators.instance_of(bool), default=False
- )
- complexity = attr.ib(
- validator=attr.validators.instance_of(
- (float, int) # type: ignore[arg-type] # noqa
- ),
- default=1.0,
- )
- complexity_error = attr.ib(
- validator=attr.validators.instance_of(str),
- default=ROOM_COMPLEXITY_TOO_GREAT,
- )
- admins_can_join = attr.ib(
- validator=attr.validators.instance_of(bool), default=False
- )
-
self.limit_remote_rooms = LimitRemoteRoomsConfig(
**(config.get("limit_remote_rooms") or {})
)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 08b93b3ec1..a5301ece6f 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -92,16 +92,16 @@ class PaginationHandler:
if hs.config.worker.run_background_tasks and hs.config.retention_enabled:
# Run the purge jobs described in the configuration file.
- for job in hs.config.retention_purge_jobs:
+ for job in hs.config.server.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
self.clock.looping_call(
run_as_background_process,
- job["interval"],
+ job.interval,
"purge_history_for_rooms_in_range",
self.purge_history_for_rooms_in_range,
- job["shortest_max_lifetime"],
- job["longest_max_lifetime"],
+ job.shortest_max_lifetime,
+ job.longest_max_lifetime,
)
async def purge_history_for_rooms_in_range(
--
cgit 1.5.1
From 37bb93d1818eeda0d64c02cb772c8dee5596194f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 14:36:19 +0100
Subject: Fix exception responding to request that has been closed (#10932)
Introduced in #10905
---
changelog.d/10932.feature | 1 +
synapse/http/server.py | 14 +++++++++++---
2 files changed, 12 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/10932.feature
diff --git a/changelog.d/10932.feature b/changelog.d/10932.feature
new file mode 100644
index 0000000000..07e7b2c6a7
--- /dev/null
+++ b/changelog.d/10932.feature
@@ -0,0 +1 @@
+Speed up responding with large JSON objects to requests.
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 1a50305dcf..0df1bfbeef 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -561,9 +561,17 @@ class _ByteProducer:
self._iterator = iterator
self._paused = False
- # Register the producer and start producing data.
- self._request.registerProducer(self, True)
- self.resumeProducing()
+ try:
+ self._request.registerProducer(self, True)
+ except RuntimeError as e:
+ logger.info("Connection disconnected before response was written: %r", e)
+
+ # We drop our references to data we'll not use.
+ self._request = None
+ self._iterator = iter(())
+ else:
+ # Start producing if `registerProducer` was successful
+ self.resumeProducing()
def _send_data(self, data: List[bytes]) -> None:
"""
--
cgit 1.5.1
From 2622b28c5cbe38c60c556544aa7502a8684ee60b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 28 Sep 2021 15:25:07 +0100
Subject: Inline `_check_event_auth` for outliers (#10926)
* Inline `_check_event_auth` for outliers
When we are persisting an outlier, most of `_check_event_auth` is redundant:
* `_update_auth_events_and_context_for_auth` does nothing, because the
`input_auth_events` are (now) exactly the event's auth_events,
which means that `missing_auth` is empty.
* we don't care about soft-fail, kicking guest users or `send_on_behalf_of`
for outliers
... so the only thing that matters is the auth itself, so let's just do that.
* `_auth_and_persist_fetched_events_inner`: de-async `prep`
`prep` no longer calls any `async` methods, so let's make it synchronous.
* Simplify `_check_event_auth`
We no longer need to support outliers here, which makes things rather simpler.
* changelog
* lint
---
changelog.d/10896.misc | 2 +-
changelog.d/10926.misc | 1 +
synapse/handlers/federation_event.py | 93 ++++++++++++++----------------------
tests/test_federation.py | 1 -
4 files changed, 38 insertions(+), 59 deletions(-)
create mode 100644 changelog.d/10926.misc
diff --git a/changelog.d/10896.misc b/changelog.d/10896.misc
index 41de995842..9a765435db 100644
--- a/changelog.d/10896.misc
+++ b/changelog.d/10896.misc
@@ -1 +1 @@
- Clean up some of the federation event authentication code for clarity.
+Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/10926.misc b/changelog.d/10926.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10926.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 01fd841122..2c4644b4a3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -68,11 +68,7 @@ from synapse.types import (
UserID,
get_domain_from_id,
)
-from synapse.util.async_helpers import (
- Linearizer,
- concurrently_execute,
- yieldable_gather_results,
-)
+from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
@@ -1189,7 +1185,10 @@ class FederationEventHandler:
allow_rejected=True,
)
- async def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
+ room_version = await self._store.get_room_version_id(room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
with nested_logging_context(suffix=event.event_id):
auth = {}
for auth_event_id in event.auth_event_ids():
@@ -1207,17 +1206,15 @@ class FederationEventHandler:
auth[(ae.type, ae.state_key)] = ae
context = EventContext.for_outlier()
- context = await self._check_event_auth(
- origin,
- event,
- context,
- claimed_auth_event_map=auth,
- )
+ try:
+ event_auth.check(room_version_obj, event, auth_events=auth)
+ except AuthError as e:
+ logger.warning("Rejecting %r because %s", event, e)
+ context.rejected = RejectedReason.AUTH_ERROR
+
return event, context
- events_to_persist = (
- x for x in await yieldable_gather_results(prep, fetched_events) if x
- )
+ events_to_persist = (x for x in (prep(event) for event in fetched_events) if x)
await self.persist_events_and_notify(room_id, tuple(events_to_persist))
async def _check_event_auth(
@@ -1226,7 +1223,6 @@ class FederationEventHandler:
event: EventBase,
context: EventContext,
state: Optional[Iterable[EventBase]] = None,
- claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
backfilled: bool = False,
) -> EventContext:
"""
@@ -1242,42 +1238,36 @@ class FederationEventHandler:
The state events used to check the event for soft-fail. If this is
not provided the current state events will be used.
- claimed_auth_event_map:
- A map of (type, state_key) => event for the event's claimed auth_events.
- Possibly including events that were rejected, or are in the wrong room.
-
- Only populated when populating outliers.
-
backfilled: True if the event was backfilled.
Returns:
The updated context object.
"""
- # claimed_auth_event_map should be given iff the event is an outlier
- assert bool(claimed_auth_event_map) == event.internal_metadata.outlier
+ # This method should only be used for non-outliers
+ assert not event.internal_metadata.outlier
room_version = await self._store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
- if claimed_auth_event_map:
- # if we have a copy of the auth events from the event, use that as the
- # basis for auth.
- auth_events = claimed_auth_event_map
- else:
- # otherwise, we calculate what the auth events *should* be, and use that
- prev_state_ids = await context.get_prev_state_ids()
- auth_events_ids = self._event_auth_handler.compute_auth_events(
- event, prev_state_ids, for_verification=True
- )
- auth_events_x = await self._store.get_events(auth_events_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
+ # calculate what the auth events *should* be, to use as a basis for auth.
+ prev_state_ids = await context.get_prev_state_ids()
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
+ event, prev_state_ids, for_verification=True
+ )
+ auth_events_x = await self._store.get_events(auth_events_ids)
+ calculated_auth_event_map = {
+ (e.type, e.state_key): e for e in auth_events_x.values()
+ }
try:
(
context,
auth_events_for_auth,
) = await self._update_auth_events_and_context_for_auth(
- origin, event, context, auth_events
+ origin,
+ event,
+ context,
+ calculated_auth_event_map=calculated_auth_event_map,
)
except Exception:
# We don't really mind if the above fails, so lets not fail
@@ -1289,7 +1279,7 @@ class FederationEventHandler:
"Ignoring failure and continuing processing of event.",
event.event_id,
)
- auth_events_for_auth = auth_events
+ auth_events_for_auth = calculated_auth_event_map
try:
event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
@@ -1425,7 +1415,7 @@ class FederationEventHandler:
origin: str,
event: EventBase,
context: EventContext,
- input_auth_events: StateMap[EventBase],
+ calculated_auth_event_map: StateMap[EventBase],
) -> Tuple[EventContext, StateMap[EventBase]]:
"""Helper for _check_event_auth. See there for docs.
@@ -1443,19 +1433,17 @@ class FederationEventHandler:
event:
context:
- input_auth_events:
- Map from (event_type, state_key) to event
-
- Normally, our calculated auth_events based on the state of the room
- at the event's position in the DAG, though occasionally (eg if the
- event is an outlier), may be the auth events claimed by the remote
- server.
+ calculated_auth_event_map:
+ Our calculated auth_events based on the state of the room
+ at the event's position in the DAG.
Returns:
updated context, updated auth event map
"""
- # take a copy of input_auth_events before we modify it.
- auth_events: MutableStateMap[EventBase] = dict(input_auth_events)
+ assert not event.internal_metadata.outlier
+
+ # take a copy of calculated_auth_event_map before we modify it.
+ auth_events: MutableStateMap[EventBase] = dict(calculated_auth_event_map)
event_auth_events = set(event.auth_event_ids())
@@ -1496,15 +1484,6 @@ class FederationEventHandler:
}
)
- if event.internal_metadata.is_outlier():
- # XXX: given that, for an outlier, we'll be working with the
- # event's *claimed* auth events rather than those we calculated:
- # (a) is there any point in this test, since different_auth below will
- # obviously be empty
- # (b) alternatively, why don't we do it earlier?
- logger.info("Skipping auth_event fetch for outlier")
- return context, auth_events
-
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
diff --git a/tests/test_federation.py b/tests/test_federation.py
index c51e018da1..24fc77d7a7 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -82,7 +82,6 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
event,
context,
state=None,
- claimed_auth_event_map=None,
backfilled=False,
):
return context
--
cgit 1.5.1
From 8aaa4b7b5df5e851a5f3dd74cd3062c9f94f0066 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 28 Sep 2021 15:25:36 +0100
Subject: Drop backwards-compatibility support for "outlier" (#10903)
Before Synapse 1.31 (#9411), we relied on `outlier` being stored in the
`internal_metadata` column. We can now assume nobody will roll back their
deployment that far and drop the legacy support.
---
changelog.d/10903.misc | 1 +
synapse/storage/databases/main/events.py | 22 +---------------------
synapse/storage/schema/__init__.py | 6 ++----
3 files changed, 4 insertions(+), 25 deletions(-)
create mode 100644 changelog.d/10903.misc
diff --git a/changelog.d/10903.misc b/changelog.d/10903.misc
new file mode 100644
index 0000000000..2716ccb08c
--- /dev/null
+++ b/changelog.d/10903.misc
@@ -0,0 +1 @@
+Drop old functionality which maintained database compatibility with Synapse versions before 1.31.
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 584f818ff3..cc4e31ec30 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1276,13 +1276,6 @@ class PersistEventsStore:
logger.exception("")
raise
- # update the stored internal_metadata to update the "outlier" flag.
- # TODO: This is unused as of Synapse 1.31. Remove it once we are happy
- # to drop backwards-compatibility with 1.30.
- metadata_json = json_encoder.encode(event.internal_metadata.get_dict())
- sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?"
- txn.execute(sql, (metadata_json, event.event_id))
-
# Add an entry to the ex_outlier_stream table to replicate the
# change in outlier status to our workers.
stream_order = event.internal_metadata.stream_ordering
@@ -1327,19 +1320,6 @@ class PersistEventsStore:
d.pop("redacted_because", None)
return d
- def get_internal_metadata(event):
- im = event.internal_metadata.get_dict()
-
- # temporary hack for database compatibility with Synapse 1.30 and earlier:
- # store the `outlier` flag inside the internal_metadata json as well as in
- # the `events` table, so that if anyone rolls back to an older Synapse,
- # things keep working. This can be removed once we are happy to drop support
- # for that
- if event.internal_metadata.is_outlier():
- im["outlier"] = True
-
- return im
-
self.db_pool.simple_insert_many_txn(
txn,
table="event_json",
@@ -1348,7 +1328,7 @@ class PersistEventsStore:
"event_id": event.event_id,
"room_id": event.room_id,
"internal_metadata": json_encoder.encode(
- get_internal_metadata(event)
+ event.internal_metadata.get_dict()
),
"json": json_encoder.encode(event_dict(event)),
"format_version": event.format_version,
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 573e05a482..1aee741a8b 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# When updating these values, please leave a short summary of the changes below.
-
-SCHEMA_VERSION = 64
+SCHEMA_VERSION = 64 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -46,7 +44,7 @@ Changes in SCHEMA_VERSION = 64:
"""
-SCHEMA_COMPAT_VERSION = 59
+SCHEMA_COMPAT_VERSION = 60 # 60: "outlier" not in internal_metadata.
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
This value is stored in the database, and checked on startup. If the value in the
--
cgit 1.5.1
From 0f007fe009dde43a3a85aacee12cd51cd603bd1c Mon Sep 17 00:00:00 2001
From: Hillery Shay
Date: Tue, 28 Sep 2021 09:13:23 -0700
Subject: Update utility code to handle C implementations of frozendict
(#10902)
* update _handle_frozendict to work with c implementations of frozen dict
* add changelog
* add clarifying comment to _handle_frozendict
---
changelog.d/10902.misc | 1 +
synapse/util/__init__.py | 8 +++++++-
2 files changed, 8 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10902.misc
diff --git a/changelog.d/10902.misc b/changelog.d/10902.misc
new file mode 100644
index 0000000000..2cd79887f6
--- /dev/null
+++ b/changelog.d/10902.misc
@@ -0,0 +1 @@
+Update utility code to handle C implementations of frozendict.
\ No newline at end of file
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index bd234549bd..64daff59df 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -50,7 +50,13 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]:
if type(obj) is frozendict:
# fishing the protected dict out of the object is a bit nasty,
# but we don't really want the overhead of copying the dict.
- return obj._dict
+ try:
+ return obj._dict
+ except AttributeError:
+ # When the C implementation of frozendict is used,
+ # there isn't a `_dict` attribute with a dict
+ # so we resort to making a copy of the frozendict
+ return dict(obj)
raise TypeError(
"Object of type %s is not JSON serializable" % obj.__class__.__name__
)
--
cgit 1.5.1
From 62800a8fe3b531369c09bb859e90f4b97cd98584 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 28 Sep 2021 17:32:31 +0100
Subject: Add #10932 to release
---
changelog.d/10932.feature | 1 -
1 file changed, 1 deletion(-)
delete mode 100644 changelog.d/10932.feature
diff --git a/changelog.d/10932.feature b/changelog.d/10932.feature
deleted file mode 100644
index 07e7b2c6a7..0000000000
--- a/changelog.d/10932.feature
+++ /dev/null
@@ -1 +0,0 @@
-Speed up responding with large JSON objects to requests.
--
cgit 1.5.1
From 9fd057b8c5a8c5748e7d8137d1485c38abd9602f Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 28 Sep 2021 21:23:16 -0500
Subject: Ensure `(room_id, next_batch_id)` is unique to avoid
cross-talk/conflicts between batches (MSC2716) (#10877)
Part of [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716)
Part of https://github.com/matrix-org/synapse/issues/10737
---
changelog.d/10877.feature | 1 +
synapse/handlers/message.py | 34 ++++++++++++++++++++++++++++
synapse/rest/client/room_batch.py | 6 +++--
synapse/storage/databases/main/room_batch.py | 6 +++--
4 files changed, 43 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/10877.feature
diff --git a/changelog.d/10877.feature b/changelog.d/10877.feature
new file mode 100644
index 0000000000..06a246c108
--- /dev/null
+++ b/changelog.d/10877.feature
@@ -0,0 +1 @@
+Ensure `(room_id, next_batch_id)` is unique across [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms to avoid cross-talk/conflicts between batches.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c66aefe2c4..07aadf3f3c 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -16,6 +16,7 @@
# limitations under the License.
import logging
import random
+from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
from canonicaljson import encode_canonical_json
@@ -1461,6 +1462,39 @@ class EventCreationHandler:
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")
+ if event.type == EventTypes.MSC2716_INSERTION:
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ create_event = await self.store.get_create_event_for_room(event.room_id)
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+
+ # Only check an insertion event if the room version
+ # supports it or the event is from the room creator.
+ if room_version_obj.msc2716_historical or (
+ self.config.experimental.msc2716_enabled
+ and event.sender == room_creator
+ ):
+ next_batch_id = event.content.get(
+ EventContentFields.MSC2716_NEXT_BATCH_ID
+ )
+ conflicting_insertion_event_id = (
+ await self.store.get_insertion_event_by_batch_id(
+ event.room_id, next_batch_id
+ )
+ )
+ if conflicting_insertion_event_id is not None:
+ # The current insertion event that we're processing is invalid
+ # because an insertion event already exists in the room with the
+ # same next_batch_id. We can't allow multiple because the batch
+ # pointing will get weird, e.g. we can't determine which insertion
+ # event the batch event is pointing to.
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Another insertion event already exists with the same next_batch_id",
+ errcode=Codes.INVALID_PARAM,
+ )
+
# Mark any `m.historical` messages as backfilled so they don't appear
# in `/sync` and have the proper decrementing `stream_ordering` as we import
backfilled = False
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
index bf14ec384e..1dffcc3147 100644
--- a/synapse/rest/client/room_batch.py
+++ b/synapse/rest/client/room_batch.py
@@ -306,11 +306,13 @@ class RoomBatchSendEventRestServlet(RestServlet):
# Verify the batch_id_from_query corresponds to an actual insertion event
# and have the batch connected.
corresponding_insertion_event_id = (
- await self.store.get_insertion_event_by_batch_id(batch_id_from_query)
+ await self.store.get_insertion_event_by_batch_id(
+ room_id, batch_id_from_query
+ )
)
if corresponding_insertion_event_id is None:
raise SynapseError(
- 400,
+ HTTPStatus.BAD_REQUEST,
"No insertion event corresponds to the given ?batch_id",
errcode=Codes.INVALID_PARAM,
)
diff --git a/synapse/storage/databases/main/room_batch.py b/synapse/storage/databases/main/room_batch.py
index a383388757..300a563c9e 100644
--- a/synapse/storage/databases/main/room_batch.py
+++ b/synapse/storage/databases/main/room_batch.py
@@ -18,7 +18,9 @@ from synapse.storage._base import SQLBaseStore
class RoomBatchStore(SQLBaseStore):
- async def get_insertion_event_by_batch_id(self, batch_id: str) -> Optional[str]:
+ async def get_insertion_event_by_batch_id(
+ self, room_id: str, batch_id: str
+ ) -> Optional[str]:
"""Retrieve a insertion event ID.
Args:
@@ -30,7 +32,7 @@ class RoomBatchStore(SQLBaseStore):
"""
return await self.db_pool.simple_select_one_onecol(
table="insertion_events",
- keyvalues={"next_batch_id": batch_id},
+ keyvalues={"room_id": room_id, "next_batch_id": batch_id},
retcol="event_id",
allow_none=True,
)
--
cgit 1.5.1
From 2be0fde3d65c2dec7fb088de20736b9e81ada948 Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Wed, 29 Sep 2021 10:24:37 +0100
Subject: Fix empty `url_cache_thumbnails/yyyy-mm-dd/` directories being left
behind (#10924)
---
changelog.d/10924.bugfix | 1 +
synapse/rest/media/v1/preview_url_resource.py | 74 ++++++++++++++++-----------
tests/rest/media/v1/test_url_preview.py | 31 +++++++++++
3 files changed, 75 insertions(+), 31 deletions(-)
create mode 100644 changelog.d/10924.bugfix
diff --git a/changelog.d/10924.bugfix b/changelog.d/10924.bugfix
new file mode 100644
index 0000000000..c73a51e32f
--- /dev/null
+++ b/changelog.d/10924.bugfix
@@ -0,0 +1 @@
+Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 79a42b2455..044f44a397 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -73,6 +73,7 @@ OG_TAG_VALUE_MAXLEN = 1000
ONE_HOUR = 60 * 60 * 1000
ONE_DAY = 24 * ONE_HOUR
+IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -496,6 +497,27 @@ class PreviewUrlResource(DirectServeJsonResource):
logger.info("Still running DB updates; skipping expiry")
return
+ def try_remove_parent_dirs(dirs: Iterable[str]) -> None:
+ """Attempt to remove the given chain of parent directories
+
+ Args:
+ dirs: The list of directory paths to delete, with children appearing
+ before their parents.
+ """
+ for dir in dirs:
+ try:
+ os.rmdir(dir)
+ except FileNotFoundError:
+ # Already deleted, continue with deleting the rest
+ pass
+ except OSError as e:
+ # Failed, skip deleting the rest of the parent dirs
+ if e.errno != errno.ENOTEMPTY:
+ logger.warning(
+ "Failed to remove media directory: %r: %s", dir, e
+ )
+ break
+
# First we delete expired url cache entries
media_ids = await self.store.get_expired_url_cache(now)
@@ -504,20 +526,16 @@ class PreviewUrlResource(DirectServeJsonResource):
fname = self.filepaths.url_cache_filepath(media_id)
try:
os.remove(fname)
+ except FileNotFoundError:
+ pass # If the path doesn't exist, meh
except OSError as e:
- # If the path doesn't exist, meh
- if e.errno != errno.ENOENT:
- logger.warning("Failed to remove media: %r: %s", media_id, e)
- continue
+ logger.warning("Failed to remove media: %r: %s", media_id, e)
+ continue
removed_media.append(media_id)
- try:
- dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
- for dir in dirs:
- os.rmdir(dir)
- except Exception:
- pass
+ dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
+ try_remove_parent_dirs(dirs)
await self.store.delete_url_cache(removed_media)
@@ -530,7 +548,7 @@ class PreviewUrlResource(DirectServeJsonResource):
# These may be cached for a bit on the client (i.e., they
# may have a room open with a preview url thing open).
# So we wait a couple of days before deleting, just in case.
- expire_before = now - 2 * ONE_DAY
+ expire_before = now - IMAGE_CACHE_EXPIRY_MS
media_ids = await self.store.get_url_cache_media_before(expire_before)
removed_media = []
@@ -538,36 +556,30 @@ class PreviewUrlResource(DirectServeJsonResource):
fname = self.filepaths.url_cache_filepath(media_id)
try:
os.remove(fname)
+ except FileNotFoundError:
+ pass # If the path doesn't exist, meh
except OSError as e:
- # If the path doesn't exist, meh
- if e.errno != errno.ENOENT:
- logger.warning("Failed to remove media: %r: %s", media_id, e)
- continue
+ logger.warning("Failed to remove media: %r: %s", media_id, e)
+ continue
- try:
- dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
- for dir in dirs:
- os.rmdir(dir)
- except Exception:
- pass
+ dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
+ try_remove_parent_dirs(dirs)
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
try:
shutil.rmtree(thumbnail_dir)
+ except FileNotFoundError:
+ pass # If the path doesn't exist, meh
except OSError as e:
- # If the path doesn't exist, meh
- if e.errno != errno.ENOENT:
- logger.warning("Failed to remove media: %r: %s", media_id, e)
- continue
+ logger.warning("Failed to remove media: %r: %s", media_id, e)
+ continue
removed_media.append(media_id)
- try:
- dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
- for dir in dirs:
- os.rmdir(dir)
- except Exception:
- pass
+ dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
+ # Note that one of the directories to be deleted has already been
+ # removed by the `rmtree` above.
+ try_remove_parent_dirs(dirs)
await self.store.delete_url_cache_media(removed_media)
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 4d09b5d07e..ce43de780b 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -21,11 +21,13 @@ from twisted.internet.error import DNSLookupError
from twisted.test.proto_helpers import AccumulatingProtocol
from synapse.config.oembed import OEmbedEndpointConfig
+from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS
from synapse.util.stringutils import parse_and_validate_mxc_uri
from tests import unittest
from tests.server import FakeTransport
from tests.test_utils import SMALL_PNG
+from tests.utils import MockClock
try:
import lxml
@@ -851,3 +853,32 @@ class URLPreviewTests(unittest.HomeserverTestCase):
404,
"URL cache thumbnail was unexpectedly retrieved from a storage provider",
)
+
+ def test_cache_expiry(self):
+ """Test that URL cache files and thumbnails are cleaned up properly on expiry."""
+ self.preview_url.clock = MockClock()
+
+ _host, media_id = self._download_image()
+
+ file_path = self.preview_url.filepaths.url_cache_filepath(media_id)
+ file_dirs = self.preview_url.filepaths.url_cache_filepath_dirs_to_delete(
+ media_id
+ )
+ thumbnail_dir = self.preview_url.filepaths.url_cache_thumbnail_directory(
+ media_id
+ )
+ thumbnail_dirs = self.preview_url.filepaths.url_cache_thumbnail_dirs_to_delete(
+ media_id
+ )
+
+ self.assertTrue(os.path.isfile(file_path))
+ self.assertTrue(os.path.isdir(thumbnail_dir))
+
+ self.preview_url.clock.advance_time_msec(IMAGE_CACHE_EXPIRY_MS + 1)
+ self.get_success(self.preview_url._expire_url_cache_data())
+
+ for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs:
+ self.assertFalse(
+ os.path.exists(path),
+ f"{os.path.relpath(path, self.media_store_path)} was not deleted",
+ )
--
cgit 1.5.1
From 5279b9161b323cccdb74dcdf1a68fa7e19f091d4 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 29 Sep 2021 10:57:10 +0100
Subject: Use `RoomVersion` objects (#10934)
Various refactors to use `RoomVersion` objects instead of room version identifiers.
---
changelog.d/10934.misc | 1 +
synapse/events/builder.py | 20 ------------------
synapse/handlers/federation.py | 46 ++++++++++++++++++++++++------------------
synapse/handlers/message.py | 27 +++++++++++++++++++------
synapse/handlers/room.py | 4 ++--
5 files changed, 50 insertions(+), 48 deletions(-)
create mode 100644 changelog.d/10934.misc
diff --git a/changelog.d/10934.misc b/changelog.d/10934.misc
new file mode 100644
index 0000000000..56c640ec9e
--- /dev/null
+++ b/changelog.d/10934.misc
@@ -0,0 +1 @@
+Refactor various parts of the codebase to use `RoomVersion` objects instead of room version identifier strings.
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 87e2bb123b..50f2a4c1f4 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -18,10 +18,8 @@ import attr
from nacl.signing import SigningKey
from synapse.api.constants import MAX_DEPTH
-from synapse.api.errors import UnsupportedRoomVersionError
from synapse.api.room_versions import (
KNOWN_EVENT_FORMAT_VERSIONS,
- KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
)
@@ -197,24 +195,6 @@ class EventBuilderFactory:
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
- def new(self, room_version: str, key_values: dict) -> EventBuilder:
- """Generate an event builder appropriate for the given room version
-
- Deprecated: use for_room_version with a RoomVersion object instead
-
- Args:
- room_version: Version of the room that we're creating an event builder for
- key_values: Fields used as the basis of the new event
-
- Returns:
- EventBuilder
- """
- v = KNOWN_ROOM_VERSIONS.get(room_version)
- if not v:
- # this can happen if support is withdrawn for a room version
- raise UnsupportedRoomVersionError()
- return self.for_room_version(v, key_values)
-
def for_room_version(
self, room_version: RoomVersion, key_values: dict
) -> EventBuilder:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index b17ef2a9a1..16c435ee86 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -718,8 +718,8 @@ class FederationHandler(BaseHandler):
state_ids,
)
- builder = self.event_builder_factory.new(
- room_version.identifier,
+ builder = self.event_builder_factory.for_room_version(
+ room_version,
{
"type": EventTypes.Member,
"content": event_content,
@@ -897,9 +897,9 @@ class FederationHandler(BaseHandler):
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
- room_version = await self.store.get_room_version_id(room_id)
- builder = self.event_builder_factory.new(
- room_version,
+ room_version_obj = await self.store.get_room_version(room_id)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj,
{
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
@@ -917,7 +917,7 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
await self._event_auth_handler.check_from_context(
- room_version, event, context, do_sig_check=False
+ room_version_obj.identifier, event, context, do_sig_check=False
)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
@@ -949,10 +949,10 @@ class FederationHandler(BaseHandler):
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
- room_version = await self.store.get_room_version_id(room_id)
+ room_version_obj = await self.store.get_room_version(room_id)
- builder = self.event_builder_factory.new(
- room_version,
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj,
{
"type": EventTypes.Member,
"content": {"membership": Membership.KNOCK},
@@ -979,7 +979,7 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
await self._event_auth_handler.check_from_context(
- room_version, event, context, do_sig_check=False
+ room_version_obj.identifier, event, context, do_sig_check=False
)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
@@ -1245,8 +1245,10 @@ class FederationHandler(BaseHandler):
}
if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
- room_version = await self.store.get_room_version_id(room_id)
- builder = self.event_builder_factory.new(room_version, event_dict)
+ room_version_obj = await self.store.get_room_version(room_id)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
@@ -1254,7 +1256,7 @@ class FederationHandler(BaseHandler):
)
event, context = await self.add_display_name_to_third_party_invite(
- room_version, event_dict, event, context
+ room_version_obj, event_dict, event, context
)
EventValidator().validate_new(event, self.config)
@@ -1265,7 +1267,7 @@ class FederationHandler(BaseHandler):
try:
await self._event_auth_handler.check_from_context(
- room_version, event, context
+ room_version_obj.identifier, event, context
)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
@@ -1299,22 +1301,24 @@ class FederationHandler(BaseHandler):
"""
assert_params_in_dict(event_dict, ["room_id"])
- room_version = await self.store.get_room_version_id(event_dict["room_id"])
+ room_version_obj = await self.store.get_room_version(event_dict["room_id"])
# NB: event_dict has a particular specced format we might need to fudge
# if we change event formats too much.
- builder = self.event_builder_factory.new(room_version, event_dict)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
- room_version, event_dict, event, context
+ room_version_obj, event_dict, event, context
)
try:
await self._event_auth_handler.check_from_context(
- room_version, event, context
+ room_version_obj.identifier, event, context
)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
@@ -1331,7 +1335,7 @@ class FederationHandler(BaseHandler):
async def add_display_name_to_third_party_invite(
self,
- room_version: str,
+ room_version_obj: RoomVersion,
event_dict: JsonDict,
event: EventBase,
context: EventContext,
@@ -1363,7 +1367,9 @@ class FederationHandler(BaseHandler):
# auth checks. If we need the invite and don't have it then the
# auth check code will explode appropriately.
- builder = self.event_builder_factory.new(room_version, event_dict)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 07aadf3f3c..39c18ecf99 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -40,6 +40,7 @@ from synapse.api.errors import (
NotFoundError,
ShadowBanError,
SynapseError,
+ UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.api.urls import ConsentURIBuilder
@@ -550,16 +551,22 @@ class EventCreationHandler:
await self.auth.check_auth_blocking(requester=requester)
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
- room_version = event_dict["content"]["room_version"]
+ room_version_id = event_dict["content"]["room_version"]
+ room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
+ if not room_version_obj:
+ # this can happen if support is withdrawn for a room version
+ raise UnsupportedRoomVersionError(room_version_id)
else:
try:
- room_version = await self.store.get_room_version_id(
+ room_version_obj = await self.store.get_room_version(
event_dict["room_id"]
)
except NotFoundError:
raise AuthError(403, "Unknown room")
- builder = self.event_builder_factory.new(room_version, event_dict)
+ builder = self.event_builder_factory.for_room_version(
+ room_version_obj, event_dict
+ )
self.validator.validate_builder(builder)
@@ -1070,9 +1077,17 @@ class EventCreationHandler:
EventTypes.Create,
"",
):
- room_version = event.content.get("room_version", RoomVersions.V1.identifier)
+ room_version_id = event.content.get(
+ "room_version", RoomVersions.V1.identifier
+ )
+ room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
+ if not room_version_obj:
+ raise UnsupportedRoomVersionError(
+ "Attempt to create a room with unsupported room version %s"
+ % (room_version_id,)
+ )
else:
- room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = await self.store.get_room_version(event.room_id)
if event.internal_metadata.is_out_of_band_membership():
# the only sort of out-of-band-membership events we expect to see here are
@@ -1082,7 +1097,7 @@ class EventCreationHandler:
else:
try:
await self._event_auth_handler.check_from_context(
- room_version, event, context
+ room_version_obj.identifier, event, context
)
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 8fede5e935..dc4fab2223 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -237,9 +237,9 @@ class RoomCreationHandler(BaseHandler):
},
},
)
- old_room_version = await self.store.get_room_version_id(old_room_id)
+ old_room_version = await self.store.get_room_version(old_room_id)
await self._event_auth_handler.check_from_context(
- old_room_version, tombstone_event, tombstone_context
+ old_room_version.identifier, tombstone_event, tombstone_context
)
await self.clone_existing_room(
--
cgit 1.5.1
From 67815cc3db971f3fd191e6e161e88037dee387d3 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Wed, 29 Sep 2021 11:00:56 +0100
Subject: Tweak changelog
---
CHANGES.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index 0b209edd4c..a8163802c2 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -5,7 +5,7 @@ Features
--------
- Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event. ([\#10776](https://github.com/matrix-org/synapse/issues/10776))
-- Improve oEmbed previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819))
+- Improve oEmbed URL previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819))
- Speed up responding with large JSON objects to requests. ([\#10868](https://github.com/matrix-org/synapse/issues/10868), [\#10905](https://github.com/matrix-org/synapse/issues/10905))
- Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. ([\#10898](https://github.com/matrix-org/synapse/issues/10898))
@@ -54,14 +54,14 @@ Internal Changes
haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826))
- Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829))
- Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856))
-- Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833))
+- Extend the Module API to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833))
- Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834))
- Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835))
- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint. ([\#10838](https://github.com/matrix-org/synapse/issues/10838))
- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839))
- Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867))
- Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879))
-- Break down Grafana's cache expiry time series based on reason for eviction, c.f. #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
+- Break down Grafana's cache expiry time series based on reason for eviction, c.f. [\#10829](https://github.com/matrix-org/synapse/issues/10829). ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
- Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901))
- Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889))
- Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891))
--
cgit 1.5.1
From 1b9ce5e8a6ed37484665b595e3ed01a8e26f9dd7 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Wed, 29 Sep 2021 11:09:00 +0100
Subject: Indicate when bugs were introduced and tidy up
---
CHANGES.md | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index a8163802c2..e27b4aa942 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -15,19 +15,17 @@ Bugfixes
- Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka. ([\#10690](https://github.com/matrix-org/synapse/issues/10690))
- Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory. ([\#10782](https://github.com/matrix-org/synapse/issues/10782))
-- Allow sending a membership event to unban a user. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807))
-- Fix a case where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810))
-- Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827))
+- Fix a long-standing bug that caused unbanning a user by sending a membership event to fail. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807))
+- Fix a long-standing bug where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810))
+- Fix a long-standing bug causing an error in the deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827))
- Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843))
-- Fix a bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859))
+- Fix a long-standing bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859))
- Fix a bug introduced in Synapse 1.37.0 which caused `knock` membership events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873))
-- Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875))
-- Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881))
-- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887))
+- Fix invalidating one-time key count cache after claiming keys. The bug was introduced in Synapse v1.41.0. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875))
+- Fix a long-standing bug causing application service users to be subject to MAU blocking if the MAU limit had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881))
- Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected. ([\#10907](https://github.com/matrix-org/synapse/issues/10907))
-- Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911))
-- Fix race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913))
-- Fix debian builds due to dh-virtualenv no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931))
+- Fix a long-standing bug causing URL cache files to be stored in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911))
+- Fix a long-standing bug leading to race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913))
Improved Documentation
@@ -53,7 +51,7 @@ Internal Changes
- Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826))
- Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829))
-- Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856))
+- Add missing type hints to `synapse.handlers`. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856))
- Extend the Module API to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833))
- Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834))
- Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835))
@@ -63,11 +61,13 @@ Internal Changes
- Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879))
- Break down Grafana's cache expiry time series based on reason for eviction, c.f. [\#10829](https://github.com/matrix-org/synapse/issues/10829). ([\#10880](https://github.com/matrix-org/synapse/issues/10880))
- Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901))
+- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887))
- Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889))
- Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891))
- Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. ([\#10906](https://github.com/matrix-org/synapse/issues/10906))
-- Document and summarize changes in schema version `61` - `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917))
+- Document and summarize changes in schema version `61` – `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917))
- Update release script to sign the newly created git tags. ([\#10925](https://github.com/matrix-org/synapse/issues/10925))
+- Fix Debian builds due to `dh-virtualenv` no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931))
Synapse 1.43.0 (2021-09-21)
--
cgit 1.5.1
From 13032b6603d91d9960592fe2506bb5dcb4ae1ad8 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Wed, 29 Sep 2021 11:13:03 +0100
Subject: Bump the date because the release ran over
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index e27b4aa942..271e2271fb 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,4 +1,4 @@
-Synapse 1.44.0rc1 (2021-09-28)
+Synapse 1.44.0rc1 (2021-09-29)
==============================
Features
--
cgit 1.5.1
From 8cef1ab2ac8d1602ea6a087384059d104097140f Mon Sep 17 00:00:00 2001
From: Travis Ralston
Date: Wed, 29 Sep 2021 04:32:45 -0600
Subject: Implement MSC3069: Guest support on whoami (#9655)
---
changelog.d/9655.feature | 1 +
synapse/rest/client/account.py | 8 +++++--
tests/rest/client/test_account.py | 49 +++++++++++++++++++++++++++++++++++----
3 files changed, 51 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/9655.feature
diff --git a/changelog.d/9655.feature b/changelog.d/9655.feature
new file mode 100644
index 0000000000..70cac230d8
--- /dev/null
+++ b/changelog.d/9655.feature
@@ -0,0 +1 @@
+Add [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069) support to `/account/whoami`.
\ No newline at end of file
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 6a7608d60b..bacb828330 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -878,9 +878,13 @@ class WhoamiRestServlet(RestServlet):
self.auth = hs.get_auth()
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request)
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
- response = {"user_id": requester.user.to_string()}
+ response = {
+ "user_id": requester.user.to_string(),
+ # MSC: https://github.com/matrix-org/matrix-doc/pull/3069
+ "org.matrix.msc3069.is_guest": bool(requester.is_guest),
+ }
# Appservices and similar accounts do not have device IDs
# that we can report on, so exclude them for compliance.
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index 9e9e953cf4..64b0b8458b 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -470,13 +470,45 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
register.register_servlets,
]
+ def default_config(self):
+ config = super().default_config()
+ config["allow_guest_access"] = True
+ return config
+
def test_GET_whoami(self):
device_id = "wouldgohere"
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test", device_id=device_id)
- whoami = self.whoami(tok)
- self.assertEqual(whoami, {"user_id": user_id, "device_id": device_id})
+ whoami = self._whoami(tok)
+ self.assertEqual(
+ whoami,
+ {
+ "user_id": user_id,
+ "device_id": device_id,
+ # Unstable until MSC3069 enters spec
+ "org.matrix.msc3069.is_guest": False,
+ },
+ )
+
+ def test_GET_whoami_guests(self):
+ channel = self.make_request(
+ b"POST", b"/_matrix/client/r0/register?kind=guest", b"{}"
+ )
+ tok = channel.json_body["access_token"]
+ user_id = channel.json_body["user_id"]
+ device_id = channel.json_body["device_id"]
+
+ whoami = self._whoami(tok)
+ self.assertEqual(
+ whoami,
+ {
+ "user_id": user_id,
+ "device_id": device_id,
+ # Unstable until MSC3069 enters spec
+ "org.matrix.msc3069.is_guest": True,
+ },
+ )
def test_GET_whoami_appservices(self):
user_id = "@as:test"
@@ -491,11 +523,18 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
)
self.hs.get_datastore().services_cache.append(appservice)
- whoami = self.whoami(as_token)
- self.assertEqual(whoami, {"user_id": user_id})
+ whoami = self._whoami(as_token)
+ self.assertEqual(
+ whoami,
+ {
+ "user_id": user_id,
+ # Unstable until MSC3069 enters spec
+ "org.matrix.msc3069.is_guest": False,
+ },
+ )
self.assertFalse(hasattr(whoami, "device_id"))
- def whoami(self, tok):
+ def _whoami(self, tok):
channel = self.make_request("GET", "account/whoami", {}, access_token=tok)
self.assertEqual(channel.code, 200)
return channel.json_body
--
cgit 1.5.1
From 94b620a5edd6b5bc55c8aad6e00a11cc6bf210fa Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Wed, 29 Sep 2021 06:44:15 -0400
Subject: Use direct references for configuration variables (part 6). (#10916)
---
changelog.d/10916.misc | 1 +
synapse/app/_base.py | 8 ++++----
synapse/app/admin_cmd.py | 4 ++--
synapse/app/generic_worker.py | 2 +-
synapse/app/homeserver.py | 14 +++++++-------
synapse/app/phone_stats_home.py | 8 ++++----
synapse/config/_base.py | 2 +-
synapse/config/server.py | 4 +---
synapse/events/presence_router.py | 6 +++---
synapse/events/utils.py | 2 +-
synapse/federation/transport/server/__init__.py | 2 +-
synapse/handlers/directory.py | 2 +-
synapse/handlers/federation.py | 2 +-
synapse/handlers/identity.py | 2 +-
synapse/handlers/message.py | 14 ++++++++------
synapse/handlers/pagination.py | 14 ++++++++++----
synapse/handlers/profile.py | 2 +-
synapse/handlers/register.py | 2 +-
synapse/handlers/room.py | 2 +-
synapse/handlers/room_member.py | 14 +++++++-------
synapse/handlers/search.py | 2 +-
synapse/handlers/user_directory.py | 2 +-
synapse/http/matrixfederationclient.py | 10 +++++-----
synapse/replication/tcp/resource.py | 2 +-
synapse/rest/client/account.py | 10 +++++-----
synapse/rest/client/capabilities.py | 4 ++--
synapse/rest/client/filter.py | 2 +-
synapse/rest/client/profile.py | 6 +++---
synapse/rest/client/register.py | 6 +++---
synapse/rest/client/room.py | 2 +-
synapse/rest/client/shared_rooms.py | 2 +-
synapse/rest/client/sync.py | 2 +-
synapse/server_notices/resource_limits_server_notices.py | 8 ++++----
synapse/storage/databases/main/censor_events.py | 8 +++++---
synapse/storage/databases/main/client_ips.py | 2 +-
synapse/storage/databases/main/events.py | 2 +-
synapse/storage/databases/main/monthly_active_users.py | 12 ++++++------
synapse/storage/databases/main/registration.py | 2 +-
synapse/storage/databases/main/room.py | 8 ++++----
synapse/storage/databases/main/search.py | 4 ++--
synapse/storage/prepare_database.py | 2 +-
tests/api/test_auth.py | 14 +++++++-------
tests/federation/test_federation_server.py | 2 +-
tests/handlers/test_register.py | 14 +++++++-------
tests/http/test_fedclient.py | 2 +-
tests/rest/admin/test_user.py | 6 +++---
tests/rest/client/test_account.py | 2 +-
tests/rest/client/test_capabilities.py | 2 +-
tests/rest/client/test_presence.py | 2 +-
tests/rest/client/test_register.py | 4 ++--
.../server_notices/test_resource_limits_server_notices.py | 2 +-
tests/storage/test_monthly_active_users.py | 14 +++++++-------
tests/test_mau.py | 2 +-
tests/unittest.py | 2 +-
54 files changed, 141 insertions(+), 132 deletions(-)
create mode 100644 changelog.d/10916.misc
diff --git a/changelog.d/10916.misc b/changelog.d/10916.misc
new file mode 100644
index 0000000000..586a0b3a96
--- /dev/null
+++ b/changelog.d/10916.misc
@@ -0,0 +1 @@
+Use direct references to config flags.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 548f6dcde9..749bc1deb9 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -86,11 +86,11 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
start_reactor(
appname,
- soft_file_limit=config.soft_file_limit,
- gc_thresholds=config.gc_thresholds,
+ soft_file_limit=config.server.soft_file_limit,
+ gc_thresholds=config.server.gc_thresholds,
pid_file=config.worker.worker_pid_file,
daemonize=config.worker.worker_daemonize,
- print_pidfile=config.print_pidfile,
+ print_pidfile=config.server.print_pidfile,
logger=logger,
run_command=run_command,
)
@@ -298,7 +298,7 @@ def refresh_certificate(hs):
Refresh the TLS certificates that Synapse is using by re-reading them from
disk and updating the TLS context factories to use them.
"""
- if not hs.config.has_tls_listener():
+ if not hs.config.server.has_tls_listener():
return
hs.config.read_certificate_from_disk()
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index f2c5b75247..556bcc124e 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -195,14 +195,14 @@ def start(config_options):
config.logging.no_redirect_stdio = True
# Explicitly disable background processes
- config.update_user_directory = False
+ config.server.update_user_directory = False
config.worker.run_background_tasks = False
config.start_pushers = False
config.pusher_shard_config.instances = []
config.send_federation = False
config.federation_shard_config.instances = []
- synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
+ synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
ss = AdminCmdServer(
config.server.server_name,
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 3036e1b4a0..7489f31d9a 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -462,7 +462,7 @@ def start(config_options):
# For other worker types we force this to off.
config.server.update_user_directory = False
- synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
+ synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
if config.server.gc_seconds:
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 205831dcda..2b2d4bbf83 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -248,7 +248,7 @@ class SynapseHomeServer(HomeServer):
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
if name == "webclient":
- webclient_loc = self.config.web_client_location
+ webclient_loc = self.config.server.web_client_location
if webclient_loc is None:
logger.warning(
@@ -343,7 +343,7 @@ def setup(config_options):
# generating config files and shouldn't try to continue.
sys.exit(0)
- events.USE_FROZEN_DICTS = config.use_frozen_dicts
+ events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
if config.server.gc_seconds:
@@ -439,11 +439,11 @@ def run(hs):
_base.start_reactor(
"synapse-homeserver",
- soft_file_limit=hs.config.soft_file_limit,
- gc_thresholds=hs.config.gc_thresholds,
- pid_file=hs.config.pid_file,
- daemonize=hs.config.daemonize,
- print_pidfile=hs.config.print_pidfile,
+ soft_file_limit=hs.config.server.soft_file_limit,
+ gc_thresholds=hs.config.server.gc_thresholds,
+ pid_file=hs.config.server.pid_file,
+ daemonize=hs.config.server.daemonize,
+ print_pidfile=hs.config.server.print_pidfile,
logger=logger,
)
diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
index 49e7a45e5c..fcd01e833c 100644
--- a/synapse/app/phone_stats_home.py
+++ b/synapse/app/phone_stats_home.py
@@ -74,7 +74,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
store = hs.get_datastore()
stats["homeserver"] = hs.config.server.server_name
- stats["server_context"] = hs.config.server_context
+ stats["server_context"] = hs.config.server.server_context
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
version = sys.version_info
@@ -171,7 +171,7 @@ def start_phone_stats_home(hs):
current_mau_count_by_service = {}
reserved_users = ()
store = hs.get_datastore()
- if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
+ if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:
current_mau_count = await store.get_monthly_active_count()
current_mau_count_by_service = (
await store.get_monthly_active_count_by_service()
@@ -183,9 +183,9 @@ def start_phone_stats_home(hs):
current_mau_by_service_gauge.labels(app_service).set(float(count))
registered_reserved_users_mau_gauge.set(float(len(reserved_users)))
- max_mau_gauge.set(float(hs.config.max_mau_value))
+ max_mau_gauge.set(float(hs.config.server.max_mau_value))
- if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
+ if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:
generate_monthly_active_users()
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
# End of monthly active user settings
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index d974a1a2a8..26152b0924 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -327,7 +327,7 @@ class RootConfig:
"""
Redirect lookups on this object either to config objects, or values on
config objects, so that `config.tls.blah` works, as well as legacy uses
- of things like `config.server_name`. It will first look up the config
+ of things like `config.server.server_name`. It will first look up the config
section name, and then values on those config classes.
"""
if item in self._configs.keys():
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 041412d7ad..818b806357 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -1,6 +1,4 @@
-# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017-2018 New Vector Ltd
-# Copyright 2019 The Matrix.org Foundation C.I.C.
+# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py
index eb4556cdc1..68b8b19024 100644
--- a/synapse/events/presence_router.py
+++ b/synapse/events/presence_router.py
@@ -45,11 +45,11 @@ def load_legacy_presence_router(hs: "HomeServer"):
configuration, and registers the hooks they implement.
"""
- if hs.config.presence_router_module_class is None:
+ if hs.config.server.presence_router_module_class is None:
return
- module = hs.config.presence_router_module_class
- config = hs.config.presence_router_config
+ module = hs.config.server.presence_router_module_class
+ config = hs.config.server.presence_router_config
api = hs.get_module_api()
presence_router = module(config=config, module_api=api)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index f86113a448..a13fb0148f 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -372,7 +372,7 @@ class EventClientSerializer:
def __init__(self, hs):
self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = (
- hs.config.experimental_msc1849_support_enabled
+ hs.config.server.experimental_msc1849_support_enabled
)
async def serialize_event(
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 95176ba6f9..c32539bf5a 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -117,7 +117,7 @@ class PublicRoomList(BaseFederationServlet):
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_room_list_handler()
- self.allow_access = hs.config.allow_public_rooms_over_federation
+ self.allow_access = hs.config.server.allow_public_rooms_over_federation
async def on_GET(
self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 5cfba3c817..9078781d5a 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -49,7 +49,7 @@ class DirectoryHandler(BaseHandler):
self.store = hs.get_datastore()
self.config = hs.config
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
- self.require_membership = hs.config.require_membership_for_aliases
+ self.require_membership = hs.config.server.require_membership_for_aliases
self.third_party_event_rules = hs.get_third_party_event_rules()
self.federation = hs.get_federation_client()
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 16c435ee86..3b0b895b07 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -762,7 +762,7 @@ class FederationHandler(BaseHandler):
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
- if self.hs.config.block_non_admin_invites:
+ if self.hs.config.server.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
if not await self.spam_checker.user_may_invite(
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index fe8a995892..a0640fcac0 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -57,7 +57,7 @@ class IdentityHandler(BaseHandler):
self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients.
self.blacklisting_http_client = SimpleHttpClient(
- hs, ip_blacklist=hs.config.federation_ip_range_blacklist
+ hs, ip_blacklist=hs.config.server.federation_ip_range_blacklist
)
self.federation_http_client = hs.get_federation_http_client()
self.hs = hs
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 39c18ecf99..3b8cc50ec0 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -81,7 +81,7 @@ class MessageHandler:
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._event_serializer = hs.get_event_client_serializer()
- self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+ self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
# The scheduled call to self._expire_event. None if no call is currently
# scheduled.
@@ -415,7 +415,9 @@ class EventCreationHandler:
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.config = hs.config
- self.require_membership_for_aliases = hs.config.require_membership_for_aliases
+ self.require_membership_for_aliases = (
+ hs.config.server.require_membership_for_aliases
+ )
self._events_shard_config = self.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
@@ -425,7 +427,7 @@ class EventCreationHandler:
Membership.JOIN,
Membership.KNOCK,
}
- if self.hs.config.include_profile_data_on_invite:
+ if self.hs.config.server.include_profile_data_on_invite:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
@@ -461,11 +463,11 @@ class EventCreationHandler:
#
self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {}
# The number of forward extremeities before a dummy event is sent.
- self._dummy_events_threshold = hs.config.dummy_events_threshold
+ self._dummy_events_threshold = hs.config.server.dummy_events_threshold
if (
self.config.worker.run_background_tasks
- and self.config.cleanup_extremities_with_dummy_events
+ and self.config.server.cleanup_extremities_with_dummy_events
):
self.clock.looping_call(
lambda: run_as_background_process(
@@ -477,7 +479,7 @@ class EventCreationHandler:
self._message_handler = hs.get_message_handler()
- self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+ self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
self._external_cache = hs.get_external_cache()
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index a5301ece6f..176e4dfdd4 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -85,12 +85,18 @@ class PaginationHandler:
self._purges_by_id: Dict[str, PurgeStatus] = {}
self._event_serializer = hs.get_event_client_serializer()
- self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime
+ self._retention_default_max_lifetime = (
+ hs.config.server.retention_default_max_lifetime
+ )
- self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min
- self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max
+ self._retention_allowed_lifetime_min = (
+ hs.config.server.retention_allowed_lifetime_min
+ )
+ self._retention_allowed_lifetime_max = (
+ hs.config.server.retention_allowed_lifetime_max
+ )
- if hs.config.worker.run_background_tasks and hs.config.retention_enabled:
+ if hs.config.worker.run_background_tasks and hs.config.server.retention_enabled:
# Run the purge jobs described in the configuration file.
for job in hs.config.server.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index b23a1541bc..425c0d4973 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -397,7 +397,7 @@ class ProfileHandler(BaseHandler):
# when building a membership event. In this case, we must allow the
# lookup.
if (
- not self.hs.config.limit_profile_requests_to_users_who_share_rooms
+ not self.hs.config.server.limit_profile_requests_to_users_who_share_rooms
or not requester
):
return
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 4f99f137a2..4a7ccb882e 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -854,7 +854,7 @@ class RegistrationHandler(BaseHandler):
# Necessary due to auth checks prior to the threepid being
# written to the db
if is_threepid_reserved(
- self.hs.config.mau_limits_reserved_threepids, threepid
+ self.hs.config.server.mau_limits_reserved_threepids, threepid
):
await self.store.upsert_monthly_active_user(user_id)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index dc4fab2223..bf8a85f563 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -666,7 +666,7 @@ class RoomCreationHandler(BaseHandler):
await self.ratelimit(requester)
room_version_id = config.get(
- "room_version", self.config.default_room_version.identifier
+ "room_version", self.config.server.default_room_version.identifier
)
if not isinstance(room_version_id, str):
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 1a56c82fbd..02103f6c9a 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -90,7 +90,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.enable_3pid_lookup
- self.allow_per_room_profiles = self.config.allow_per_room_profiles
+ self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
store=self.store,
@@ -617,7 +617,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
- if self.config.block_non_admin_invites:
+ if self.config.server.block_non_admin_invites:
logger.info(
"Blocking invite: user is not admin and non-admin "
"invites disabled"
@@ -1222,7 +1222,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
- if self.config.block_non_admin_invites:
+ if self.config.server.block_non_admin_invites:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
raise SynapseError(
@@ -1420,7 +1420,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
- max_complexity = self.hs.config.limit_remote_rooms.complexity
+ max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
@@ -1436,7 +1436,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
Args:
room_id: The room ID to check for complexity.
"""
- max_complexity = self.hs.config.limit_remote_rooms.complexity
+ max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.store.get_room_complexity(room_id)
return complexity["v1"] > max_complexity
@@ -1472,7 +1472,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
if too_complex is True:
raise SynapseError(
code=400,
- msg=self.hs.config.limit_remote_rooms.complexity_error,
+ msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
@@ -1507,7 +1507,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
)
raise SynapseError(
code=400,
- msg=self.hs.config.limit_remote_rooms.complexity_error,
+ msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 8226d6f5a1..6d3333ee00 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -105,7 +105,7 @@ class SearchHandler(BaseHandler):
dict to be returned to the client with results of search
"""
- if not self.hs.config.enable_search:
+ if not self.hs.config.server.enable_search:
raise SynapseError(400, "Search is disabled on this homeserver")
batch_group = None
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index b91e7cb501..f4430ce3c9 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -60,7 +60,7 @@ class UserDirectoryHandler(StateDeltasHandler):
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
- self.update_user_directory = hs.config.update_user_directory
+ self.update_user_directory = hs.config.server.update_user_directory
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
self.spam_checker = hs.get_spam_checker()
# The current position in the current_state_delta stream
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index cdc36b8d25..4f59224686 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -327,23 +327,23 @@ class MatrixFederationHttpClient:
self.reactor = hs.get_reactor()
user_agent = hs.version_string
- if hs.config.user_agent_suffix:
- user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix)
+ if hs.config.server.user_agent_suffix:
+ user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix)
user_agent = user_agent.encode("ascii")
federation_agent = MatrixFederationAgent(
self.reactor,
tls_client_options_factory,
user_agent,
- hs.config.federation_ip_range_whitelist,
- hs.config.federation_ip_range_blacklist,
+ hs.config.server.federation_ip_range_whitelist,
+ hs.config.server.federation_ip_range_blacklist,
)
# Use a BlacklistingAgentWrapper to prevent circumventing the IP
# blacklist via IP literals in server names
self.agent = BlacklistingAgentWrapper(
federation_agent,
- ip_blacklist=hs.config.federation_ip_range_blacklist,
+ ip_blacklist=hs.config.server.federation_ip_range_blacklist,
)
self.clock = hs.get_clock()
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 030852cb5b..80f9b23bfd 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -71,7 +71,7 @@ class ReplicationStreamer:
self.notifier = hs.get_notifier()
self._instance_name = hs.get_instance_name()
- self._replication_torture_level = hs.config.replication_torture_level
+ self._replication_torture_level = hs.config.server.replication_torture_level
self.notifier.add_replication_callback(self.on_notifier_poke)
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index bacb828330..fff133ef10 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -119,7 +119,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
)
if existing_user_id is None:
- if self.config.request_token_inhibit_3pid_errors:
+ if self.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
@@ -403,7 +403,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
existing_user_id = await self.store.get_user_id_by_threepid("email", email)
if existing_user_id is not None:
- if self.config.request_token_inhibit_3pid_errors:
+ if self.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
@@ -486,7 +486,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn)
if existing_user_id is not None:
- if self.hs.config.request_token_inhibit_3pid_errors:
+ if self.hs.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
@@ -857,8 +857,8 @@ def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None:
# If the domain whitelist is set, the domain must be in it
if (
valid
- and hs.config.next_link_domain_whitelist is not None
- and next_link_parsed.hostname not in hs.config.next_link_domain_whitelist
+ and hs.config.server.next_link_domain_whitelist is not None
+ and next_link_parsed.hostname not in hs.config.server.next_link_domain_whitelist
):
valid = False
diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py
index 65b3b5ce2c..d6b6256413 100644
--- a/synapse/rest/client/capabilities.py
+++ b/synapse/rest/client/capabilities.py
@@ -44,10 +44,10 @@ class CapabilitiesRestServlet(RestServlet):
await self.auth.get_user_by_req(request, allow_guest=True)
change_password = self.auth_handler.can_change_password()
- response = {
+ response: JsonDict = {
"capabilities": {
"m.room_versions": {
- "default": self.config.default_room_version.identifier,
+ "default": self.config.server.default_room_version.identifier,
"available": {
v.identifier: v.disposition
for v in KNOWN_ROOM_VERSIONS.values()
diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py
index 6ed60c7418..cc1c2f9731 100644
--- a/synapse/rest/client/filter.py
+++ b/synapse/rest/client/filter.py
@@ -90,7 +90,7 @@ class CreateFilterRestServlet(RestServlet):
raise AuthError(403, "Can only create filters for local users")
content = parse_json_object_from_request(request)
- set_timeline_upper_limit(content, self.hs.config.filter_timeline_limit)
+ set_timeline_upper_limit(content, self.hs.config.server.filter_timeline_limit)
filter_id = await self.filtering.add_user_filter(
user_localpart=target_user.localpart, user_filter=content
diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index d0f20de569..c684636c0a 100644
--- a/synapse/rest/client/profile.py
+++ b/synapse/rest/client/profile.py
@@ -41,7 +41,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester_user = None
- if self.hs.config.require_auth_for_profile_requests:
+ if self.hs.config.server.require_auth_for_profile_requests:
requester = await self.auth.get_user_by_req(request)
requester_user = requester.user
@@ -94,7 +94,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester_user = None
- if self.hs.config.require_auth_for_profile_requests:
+ if self.hs.config.server.require_auth_for_profile_requests:
requester = await self.auth.get_user_by_req(request)
requester_user = requester.user
@@ -146,7 +146,7 @@ class ProfileRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester_user = None
- if self.hs.config.require_auth_for_profile_requests:
+ if self.hs.config.server.require_auth_for_profile_requests:
requester = await self.auth.get_user_by_req(request)
requester_user = requester.user
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 48b0062cf4..a6eb6f6410 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -129,7 +129,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
)
if existing_user_id is not None:
- if self.hs.config.request_token_inhibit_3pid_errors:
+ if self.hs.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
@@ -209,7 +209,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
)
if existing_user_id is not None:
- if self.hs.config.request_token_inhibit_3pid_errors:
+ if self.hs.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
@@ -682,7 +682,7 @@ class RegisterRestServlet(RestServlet):
# written to the db
if threepid:
if is_threepid_reserved(
- self.hs.config.mau_limits_reserved_threepids, threepid
+ self.hs.config.server.mau_limits_reserved_threepids, threepid
):
await self.store.upsert_monthly_active_user(registered_user_id)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index bf46dc60f2..ed95189b6d 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -369,7 +369,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
# Option to allow servers to require auth when accessing
# /publicRooms via CS API. This is especially helpful in private
# federations.
- if not self.hs.config.allow_public_rooms_without_auth:
+ if not self.hs.config.server.allow_public_rooms_without_auth:
raise
# We allow people to not be authed if they're just looking at our
diff --git a/synapse/rest/client/shared_rooms.py b/synapse/rest/client/shared_rooms.py
index 1d90493eb0..09a46737de 100644
--- a/synapse/rest/client/shared_rooms.py
+++ b/synapse/rest/client/shared_rooms.py
@@ -42,7 +42,7 @@ class UserSharedRoomsServlet(RestServlet):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- self.user_directory_active = hs.config.update_user_directory
+ self.user_directory_active = hs.config.server.update_user_directory
async def on_GET(
self, request: SynapseRequest, user_id: str
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 1259058b9b..913216a7c4 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -155,7 +155,7 @@ class SyncRestServlet(RestServlet):
try:
filter_object = json_decoder.decode(filter_id)
set_timeline_upper_limit(
- filter_object, self.hs.config.filter_timeline_limit
+ filter_object, self.hs.config.server.filter_timeline_limit
)
except Exception:
raise SynapseError(400, "Invalid filter JSON")
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 073b0d754f..8522930b50 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -47,9 +47,9 @@ class ResourceLimitsServerNotices:
self._notifier = hs.get_notifier()
self._enabled = (
- hs.config.limit_usage_by_mau
+ hs.config.server.limit_usage_by_mau
and self._server_notices_manager.is_enabled()
- and not hs.config.hs_disabled
+ and not hs.config.server.hs_disabled
)
async def maybe_send_server_notice_to_user(self, user_id: str) -> None:
@@ -98,7 +98,7 @@ class ResourceLimitsServerNotices:
try:
if (
limit_type == LimitBlockingTypes.MONTHLY_ACTIVE_USER
- and not self._config.mau_limit_alerting
+ and not self._config.server.mau_limit_alerting
):
# We have hit the MAU limit, but MAU alerting is disabled:
# reset room if necessary and return
@@ -149,7 +149,7 @@ class ResourceLimitsServerNotices:
"body": event_body,
"msgtype": ServerNoticeMsgType,
"server_notice_type": ServerNoticeLimitReached,
- "admin_contact": self._config.admin_contact,
+ "admin_contact": self._config.server.admin_contact,
"limit_type": event_limit_type,
}
event = await self._server_notices_manager.send_notice(
diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py
index 6305414e3d..eee07227ef 100644
--- a/synapse/storage/databases/main/censor_events.py
+++ b/synapse/storage/databases/main/censor_events.py
@@ -36,7 +36,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
if (
hs.config.worker.run_background_tasks
- and self.hs.config.redaction_retention_period is not None
+ and self.hs.config.server.redaction_retention_period is not None
):
hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000)
@@ -48,7 +48,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
By censor we mean update the event_json table with the redacted event.
"""
- if self.hs.config.redaction_retention_period is None:
+ if self.hs.config.server.redaction_retention_period is None:
return
if not (
@@ -60,7 +60,9 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
# created.
return
- before_ts = self._clock.time_msec() - self.hs.config.redaction_retention_period
+ before_ts = (
+ self._clock.time_msec() - self.hs.config.server.redaction_retention_period
+ )
# We fetch all redactions that:
# 1. point to an event we have,
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 7e33ae578c..0e1d97aaeb 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -353,7 +353,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
- self.user_ips_max_age = hs.config.user_ips_max_age
+ self.user_ips_max_age = hs.config.server.user_ips_max_age
if hs.config.worker.run_background_tasks and self.user_ips_max_age:
self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index cc4e31ec30..bc7d213fe2 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -104,7 +104,7 @@ class PersistEventsStore:
self._clock = hs.get_clock()
self._instance_name = hs.get_instance_name()
- self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+ self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
self.is_mine_id = hs.is_mine_id
# Ideally we'd move these ID gens here, unfortunately some other ID
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index b76ee51a9b..a14ac03d4b 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -32,8 +32,8 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
self._clock = hs.get_clock()
self.hs = hs
- self._limit_usage_by_mau = hs.config.limit_usage_by_mau
- self._max_mau_value = hs.config.max_mau_value
+ self._limit_usage_by_mau = hs.config.server.limit_usage_by_mau
+ self._max_mau_value = hs.config.server.max_mau_value
@cached(num_args=0)
async def get_monthly_active_count(self) -> int:
@@ -96,8 +96,8 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
"""
users = []
- for tp in self.hs.config.mau_limits_reserved_threepids[
- : self.hs.config.max_mau_value
+ for tp in self.hs.config.server.mau_limits_reserved_threepids[
+ : self.hs.config.server.max_mau_value
]:
user_id = await self.hs.get_datastore().get_user_id_by_threepid(
tp["medium"], tp["address"]
@@ -212,7 +212,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
- self._mau_stats_only = hs.config.mau_stats_only
+ self._mau_stats_only = hs.config.server.mau_stats_only
# Do not add more reserved users than the total allowable number
self.db_pool.new_transaction(
@@ -221,7 +221,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
[],
[],
self._initialise_reserved_users,
- hs.config.mau_limits_reserved_threepids[: self._max_mau_value],
+ hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value],
)
def _initialise_reserved_users(self, txn, threepids):
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index c83089ee63..7279b0924e 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -207,7 +207,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
return False
now = self._clock.time_msec()
- trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000
+ trial_duration_ms = self.config.server.mau_trial_days * 24 * 60 * 60 * 1000
is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms
return is_trial
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 118b390e93..d69eaf80ce 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -679,8 +679,8 @@ class RoomWorkerStore(SQLBaseStore):
# policy.
if not ret:
return {
- "min_lifetime": self.config.retention_default_min_lifetime,
- "max_lifetime": self.config.retention_default_max_lifetime,
+ "min_lifetime": self.config.server.retention_default_min_lifetime,
+ "max_lifetime": self.config.server.retention_default_max_lifetime,
}
row = ret[0]
@@ -690,10 +690,10 @@ class RoomWorkerStore(SQLBaseStore):
# The default values will be None if no default policy has been defined, or if one
# of the attributes is missing from the default policy.
if row["min_lifetime"] is None:
- row["min_lifetime"] = self.config.retention_default_min_lifetime
+ row["min_lifetime"] = self.config.server.retention_default_min_lifetime
if row["max_lifetime"] is None:
- row["max_lifetime"] = self.config.retention_default_max_lifetime
+ row["max_lifetime"] = self.config.server.retention_default_max_lifetime
return row
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 2a1e99e17a..c85383c975 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -51,7 +51,7 @@ class SearchWorkerStore(SQLBaseStore):
txn:
entries: entries to be added to the table
"""
- if not self.hs.config.enable_search:
+ if not self.hs.config.server.enable_search:
return
if isinstance(self.database_engine, PostgresEngine):
sql = (
@@ -105,7 +105,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
- if not hs.config.enable_search:
+ if not hs.config.server.enable_search:
return
self.db_pool.updates.register_background_update_handler(
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index f31880b8ec..a63eaddfdc 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -366,7 +366,7 @@ def _upgrade_existing_database(
+ "new for the server to understand"
)
- # some of the deltas assume that config.server_name is set correctly, so now
+ # some of the deltas assume that server_name is set correctly, so now
# is a good time to run the sanity check.
if not is_empty and "main" in databases:
from synapse.storage.databases.main import check_database_before_upgrade
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index cccff7af26..3aa9ba3c43 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -217,7 +217,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
user_id = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
- location=self.hs.config.server_name,
+ location=self.hs.config.server.server_name,
identifier="key",
key=self.hs.config.key.macaroon_secret_key,
)
@@ -239,7 +239,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
user_id = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
- location=self.hs.config.server_name,
+ location=self.hs.config.server.server_name,
identifier="key",
key=self.hs.config.key.macaroon_secret_key,
)
@@ -268,7 +268,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.store.get_monthly_active_count = simple_async_mock(lots_of_users)
e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError)
- self.assertEquals(e.value.admin_contact, self.hs.config.admin_contact)
+ self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact)
self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.assertEquals(e.value.code, 403)
@@ -303,7 +303,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
appservice = ApplicationService(
"abcd",
- self.hs.config.server_name,
+ self.hs.config.server.server_name,
id="1234",
namespaces={
"users": [{"regex": "@_appservice.*:sender", "exclusive": True}]
@@ -332,7 +332,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
appservice = ApplicationService(
"abcd",
- self.hs.config.server_name,
+ self.hs.config.server.server_name,
id="1234",
namespaces={
"users": [{"regex": "@_appservice.*:sender", "exclusive": True}]
@@ -372,7 +372,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.auth_blocking._hs_disabled = True
self.auth_blocking._hs_disabled_message = "Reason for being disabled"
e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError)
- self.assertEquals(e.value.admin_contact, self.hs.config.admin_contact)
+ self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact)
self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.assertEquals(e.value.code, 403)
@@ -387,7 +387,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.auth_blocking._hs_disabled = True
self.auth_blocking._hs_disabled_message = "Reason for being disabled"
e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError)
- self.assertEquals(e.value.admin_contact, self.hs.config.admin_contact)
+ self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact)
self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.assertEquals(e.value.code, 403)
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 0b60cc4261..03e1e11f49 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -120,7 +120,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase):
self.assertEqual(
channel.json_body["room_version"],
- self.hs.config.default_room_version.identifier,
+ self.hs.config.server.default_room_version.identifier,
)
members = set(
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index d3efb67e3e..bd05a2c2d1 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -175,20 +175,20 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.assertTrue(result_token is not None)
def test_mau_limits_when_disabled(self):
- self.hs.config.limit_usage_by_mau = False
+ self.hs.config.server.limit_usage_by_mau = False
# Ensure does not throw exception
self.get_success(self.get_or_create_user(self.requester, "a", "display_name"))
def test_get_or_create_user_mau_not_blocked(self):
- self.hs.config.limit_usage_by_mau = True
+ self.hs.config.server.limit_usage_by_mau = True
self.store.count_monthly_users = Mock(
- return_value=make_awaitable(self.hs.config.max_mau_value - 1)
+ return_value=make_awaitable(self.hs.config.server.max_mau_value - 1)
)
# Ensure does not throw exception
self.get_success(self.get_or_create_user(self.requester, "c", "User"))
def test_get_or_create_user_mau_blocked(self):
- self.hs.config.limit_usage_by_mau = True
+ self.hs.config.server.limit_usage_by_mau = True
self.store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.lots_of_users)
)
@@ -198,7 +198,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
)
self.store.get_monthly_active_count = Mock(
- return_value=make_awaitable(self.hs.config.max_mau_value)
+ return_value=make_awaitable(self.hs.config.server.max_mau_value)
)
self.get_failure(
self.get_or_create_user(self.requester, "b", "display_name"),
@@ -206,7 +206,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
)
def test_register_mau_blocked(self):
- self.hs.config.limit_usage_by_mau = True
+ self.hs.config.server.limit_usage_by_mau = True
self.store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.lots_of_users)
)
@@ -215,7 +215,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
)
self.store.get_monthly_active_count = Mock(
- return_value=make_awaitable(self.hs.config.max_mau_value)
+ return_value=make_awaitable(self.hs.config.server.max_mau_value)
)
self.get_failure(
self.handler.register_user(localpart="local_part"), ResourceLimitError
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index d9a8b077d3..638babae69 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -226,7 +226,7 @@ class FederationClientTests(HomeserverTestCase):
"""Ensure that Synapse does not try to connect to blacklisted IPs"""
# Set up the ip_range blacklist
- self.hs.config.federation_ip_range_blacklist = IPSet(
+ self.hs.config.server.federation_ip_range_blacklist = IPSet(
["127.0.0.0/8", "fe80::/64"]
)
self.reactor.lookups["internal"] = "127.0.0.1"
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index ee3ae9cce4..a285d5a7fe 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -422,7 +422,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
# Set monthly active users to the limit
store.get_monthly_active_count = Mock(
- return_value=make_awaitable(self.hs.config.max_mau_value)
+ return_value=make_awaitable(self.hs.config.server.max_mau_value)
)
# Check that the blocking of monthly active users is working as expected
# The registration of a new user fails due to the limit
@@ -1485,7 +1485,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
# Set monthly active users to the limit
self.store.get_monthly_active_count = Mock(
- return_value=make_awaitable(self.hs.config.max_mau_value)
+ return_value=make_awaitable(self.hs.config.server.max_mau_value)
)
# Check that the blocking of monthly active users is working as expected
# The registration of a new user fails due to the limit
@@ -1522,7 +1522,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
# Set monthly active users to the limit
self.store.get_monthly_active_count = Mock(
- return_value=make_awaitable(self.hs.config.max_mau_value)
+ return_value=make_awaitable(self.hs.config.server.max_mau_value)
)
# Check that the blocking of monthly active users is working as expected
# The registration of a new user fails due to the limit
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index 64b0b8458b..2f44547bfb 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -516,7 +516,7 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
appservice = ApplicationService(
as_token,
- self.hs.config.server_name,
+ self.hs.config.server.server_name,
id="1234",
namespaces={"users": [{"regex": user_id, "exclusive": True}]},
sender=user_id,
diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py
index 422361b62a..b9e3602552 100644
--- a/tests/rest/client/test_capabilities.py
+++ b/tests/rest/client/test_capabilities.py
@@ -55,7 +55,7 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase):
self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, "" + room_version)
self.assertEqual(
- self.config.default_room_version.identifier,
+ self.config.server.default_room_version.identifier,
capabilities["m.room_versions"]["default"],
)
diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py
index 1d152352d1..56fe1a3d01 100644
--- a/tests/rest/client/test_presence.py
+++ b/tests/rest/client/test_presence.py
@@ -50,7 +50,7 @@ class PresenceTestCase(unittest.HomeserverTestCase):
PUT to the status endpoint with use_presence enabled will call
set_state on the presence handler.
"""
- self.hs.config.use_presence = True
+ self.hs.config.server.use_presence = True
body = {"presence": "here", "status_msg": "beep boop"}
channel = self.make_request(
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 72a5a11b46..af135d57e1 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -50,7 +50,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
appservice = ApplicationService(
as_token,
- self.hs.config.server_name,
+ self.hs.config.server.server_name,
id="1234",
namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
sender="@as:test",
@@ -74,7 +74,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
appservice = ApplicationService(
as_token,
- self.hs.config.server_name,
+ self.hs.config.server.server_name,
id="1234",
namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
sender="@as:test",
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 7f25200a5d..36c495954f 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -346,7 +346,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
invites = []
# Register as many users as the MAU limit allows.
- for i in range(self.hs.config.max_mau_value):
+ for i in range(self.hs.config.server.max_mau_value):
localpart = "user%d" % i
user_id = self.register_user(localpart, "password")
tok = self.login(localpart, "password")
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 944dbc34a2..d6b4cdd788 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -51,7 +51,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
@override_config({"max_mau_value": 3, "mau_limit_reserved_threepids": gen_3pids(3)})
def test_initialise_reserved_users(self):
- threepids = self.hs.config.mau_limits_reserved_threepids
+ threepids = self.hs.config.server.mau_limits_reserved_threepids
# register three users, of which two have reserved 3pids, and a third
# which is a support user.
@@ -101,9 +101,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
# XXX some of this is redundant. poking things into the config shouldn't
# work, and in any case it's not obvious what we expect to happen when
# we advance the reactor.
- self.hs.config.max_mau_value = 0
+ self.hs.config.server.max_mau_value = 0
self.reactor.advance(FORTY_DAYS)
- self.hs.config.max_mau_value = 5
+ self.hs.config.server.max_mau_value = 5
self.get_success(self.store.reap_monthly_active_users())
@@ -183,7 +183,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.get_success(d)
count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(count, self.hs.config.max_mau_value)
+ self.assertEqual(count, self.hs.config.server.max_mau_value)
self.reactor.advance(FORTY_DAYS)
@@ -199,7 +199,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
def test_reap_monthly_active_users_reserved_users(self):
"""Tests that reaping correctly handles reaping where reserved users are
present"""
- threepids = self.hs.config.mau_limits_reserved_threepids
+ threepids = self.hs.config.server.mau_limits_reserved_threepids
initial_users = len(threepids)
reserved_user_number = initial_users - 1
for i in range(initial_users):
@@ -234,7 +234,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.get_success(d)
count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(count, self.hs.config.max_mau_value)
+ self.assertEqual(count, self.hs.config.server.max_mau_value)
def test_populate_monthly_users_is_guest(self):
# Test that guest users are not added to mau list
@@ -294,7 +294,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
{"medium": "email", "address": user2_email},
]
- self.hs.config.mau_limits_reserved_threepids = threepids
+ self.hs.config.server.mau_limits_reserved_threepids = threepids
d = self.store.db_pool.runInteraction(
"initialise", self.store._initialise_reserved_users, threepids
)
diff --git a/tests/test_mau.py b/tests/test_mau.py
index 66111eb367..80ab40e255 100644
--- a/tests/test_mau.py
+++ b/tests/test_mau.py
@@ -165,7 +165,7 @@ class TestMauLimit(unittest.HomeserverTestCase):
@override_config({"mau_trial_days": 1})
def test_trial_users_cant_come_back(self):
- self.hs.config.mau_trial_days = 1
+ self.hs.config.server.mau_trial_days = 1
# We should be able to register more than the limit initially
token1 = self.create_user("kermit1")
diff --git a/tests/unittest.py b/tests/unittest.py
index 7a6f5954d0..6d5d87cb78 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -232,7 +232,7 @@ class HomeserverTestCase(TestCase):
# Honour the `use_frozen_dicts` config option. We have to do this
# manually because this is taken care of in the app `start` code, which
# we don't run. Plus we want to reset it on tearDown.
- events.USE_FROZEN_DICTS = self.hs.config.use_frozen_dicts
+ events.USE_FROZEN_DICTS = self.hs.config.server.use_frozen_dicts
if self.hs is None:
raise Exception("No homeserver returned from make_homeserver.")
--
cgit 1.5.1
From e32b9f44ee466ad8dad47fdbea7e2711c11b9dc7 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 29 Sep 2021 11:57:53 +0100
Subject: Update installation instructions (#10919)
Various updates to the install docs.
---
README.rst | 2 +-
changelog.d/10919.doc | 1 +
docs/setup/installation.md | 328 +++++++++++++++++++++++----------------------
3 files changed, 167 insertions(+), 164 deletions(-)
create mode 100644 changelog.d/10919.doc
diff --git a/README.rst b/README.rst
index db977c025f..524a3a5142 100644
--- a/README.rst
+++ b/README.rst
@@ -288,7 +288,7 @@ Quick start
Before setting up a development environment for synapse, make sure you have the
system dependencies (such as the python header files) installed - see
-`Installing from source `_.
+`Platform-specific prerequisites `_.
To check out a synapse for development, clone the git repo into a working
directory of your choice::
diff --git a/changelog.d/10919.doc b/changelog.d/10919.doc
new file mode 100644
index 0000000000..d0bddc3f1b
--- /dev/null
+++ b/changelog.d/10919.doc
@@ -0,0 +1 @@
+Minor updates to the installation instructions.
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 06f869cd75..874925e927 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -18,19 +18,179 @@ that your email address is probably `user@example.com` rather than
## Installing Synapse
-### Installing from source
+### Prebuilt packages
+
+Prebuilt packages are available for a number of platforms. These are recommended
+for most users.
+
+#### Docker images and Ansible playbooks
+
+There is an official synapse image available at
+ which can be used with
+the docker-compose file available at
+[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
+Further information on this including configuration options is available in the README
+on hub.docker.com.
+
+Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
+Dockerfile to automate a synapse server in a single Docker image, at
+
+
+Slavi Pantaleev has created an Ansible playbook,
+which installs the offical Docker image of Matrix Synapse
+along with many other Matrix-related services (Postgres database, Element, coturn,
+ma1sd, SSL support, etc.).
+For more details, see
+
+
+#### Debian/Ubuntu
+
+##### Matrix.org packages
+
+Matrix.org provides Debian/Ubuntu packages of Synapse, for the amd64
+architecture via .
+
+To install the latest release:
+
+```sh
+sudo apt install -y lsb-release wget apt-transport-https
+sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
+echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
+ sudo tee /etc/apt/sources.list.d/matrix-org.list
+sudo apt update
+sudo apt install matrix-synapse-py3
+```
+
+Packages are also published for release candidates. To enable the prerelease
+channel, add `prerelease` to the `sources.list` line. For example:
+
+```sh
+sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
+echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" |
+ sudo tee /etc/apt/sources.list.d/matrix-org.list
+sudo apt update
+sudo apt install matrix-synapse-py3
+```
+
+The fingerprint of the repository signing key (as shown by `gpg
+/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
+`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
+
+##### Downstream Debian packages
+
+We do not recommend using the packages from the default Debian `buster`
+repository at this time, as they are old and suffer from known security
+vulnerabilities. You can install the latest version of Synapse from
+[our repository](#matrixorg-packages) or from `buster-backports`. Please
+see the [Debian documentation](https://backports.debian.org/Instructions/)
+for information on how to use backports.
+
+If you are using Debian `sid` or testing, Synapse is available in the default
+repositories and it should be possible to install it simply with:
+
+```sh
+sudo apt install matrix-synapse
+```
+
+##### Downstream Ubuntu packages
+
+We do not recommend using the packages in the default Ubuntu repository
+at this time, as they are old and suffer from known security vulnerabilities.
+The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
+
+#### Fedora
+
+Synapse is in the Fedora repositories as `matrix-synapse`:
+
+```sh
+sudo dnf install matrix-synapse
+```
+
+Oleg Girko provides Fedora RPMs at
+
+
+#### OpenSUSE
+
+Synapse is in the OpenSUSE repositories as `matrix-synapse`:
+
+```sh
+sudo zypper install matrix-synapse
+```
+
+#### SUSE Linux Enterprise Server
+
+Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
+
+
+#### ArchLinux
+
+The quickest way to get up and running with ArchLinux is probably with the community package
+, which should pull in most of
+the necessary dependencies.
+
+pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
+
+```sh
+sudo pip install --upgrade pip
+```
+
+If you encounter an error with lib bcrypt causing an Wrong ELF Class:
+ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
+compile it under the right architecture. (This should not be needed if
+installing under virtualenv):
+
+```sh
+sudo pip uninstall py-bcrypt
+sudo pip install py-bcrypt
+```
+
+#### Void Linux
+
+Synapse can be found in the void repositories as 'synapse':
+
+```sh
+xbps-install -Su
+xbps-install -S synapse
+```
+
+#### FreeBSD
+
+Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
+
+- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
+- Packages: `pkg install py37-matrix-synapse`
+
+#### OpenBSD
+
+As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
+underlying the homeserver directory (defaults to `/var/synapse`) has to be
+mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
+and mounting it to `/var/synapse` should be taken into consideration.
+
+Installing Synapse:
+
+```sh
+doas pkg_add synapse
+```
+
+#### NixOS
+
+Robin Lambertz has packaged Synapse for NixOS at:
+
+
+
+### Installing as a Python module from PyPI
-(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
+It's also possible to install Synapse as a Python module from PyPI.
-When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
+When following this route please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
-- Python 3.5.2 or later, up to Python 3.9.
+- Python 3.6 or later, up to Python 3.9.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
-
To install the Synapse homeserver run:
```sh
@@ -203,164 +363,6 @@ be found at for
Windows 10 and
for Windows Server.
-### Prebuilt packages
-
-As an alternative to installing from source, prebuilt packages are available
-for a number of platforms.
-
-#### Docker images and Ansible playbooks
-
-There is an official synapse image available at
- which can be used with
-the docker-compose file available at
-[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
-Further information on this including configuration options is available in the README
-on hub.docker.com.
-
-Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
-Dockerfile to automate a synapse server in a single Docker image, at
-
-
-Slavi Pantaleev has created an Ansible playbook,
-which installs the offical Docker image of Matrix Synapse
-along with many other Matrix-related services (Postgres database, Element, coturn,
-ma1sd, SSL support, etc.).
-For more details, see
-
-
-#### Debian/Ubuntu
-
-##### Matrix.org packages
-
-Matrix.org provides Debian/Ubuntu packages of Synapse via
-. To install the latest release:
-
-```sh
-sudo apt install -y lsb-release wget apt-transport-https
-sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
-echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
- sudo tee /etc/apt/sources.list.d/matrix-org.list
-sudo apt update
-sudo apt install matrix-synapse-py3
-```
-
-Packages are also published for release candidates. To enable the prerelease
-channel, add `prerelease` to the `sources.list` line. For example:
-
-```sh
-sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
-echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" |
- sudo tee /etc/apt/sources.list.d/matrix-org.list
-sudo apt update
-sudo apt install matrix-synapse-py3
-```
-
-The fingerprint of the repository signing key (as shown by `gpg
-/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
-`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
-
-##### Downstream Debian packages
-
-We do not recommend using the packages from the default Debian `buster`
-repository at this time, as they are old and suffer from known security
-vulnerabilities. You can install the latest version of Synapse from
-[our repository](#matrixorg-packages) or from `buster-backports`. Please
-see the [Debian documentation](https://backports.debian.org/Instructions/)
-for information on how to use backports.
-
-If you are using Debian `sid` or testing, Synapse is available in the default
-repositories and it should be possible to install it simply with:
-
-```sh
-sudo apt install matrix-synapse
-```
-
-##### Downstream Ubuntu packages
-
-We do not recommend using the packages in the default Ubuntu repository
-at this time, as they are old and suffer from known security vulnerabilities.
-The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
-
-#### Fedora
-
-Synapse is in the Fedora repositories as `matrix-synapse`:
-
-```sh
-sudo dnf install matrix-synapse
-```
-
-Oleg Girko provides Fedora RPMs at
-
-
-#### OpenSUSE
-
-Synapse is in the OpenSUSE repositories as `matrix-synapse`:
-
-```sh
-sudo zypper install matrix-synapse
-```
-
-#### SUSE Linux Enterprise Server
-
-Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
-
-
-#### ArchLinux
-
-The quickest way to get up and running with ArchLinux is probably with the community package
-, which should pull in most of
-the necessary dependencies.
-
-pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
-
-```sh
-sudo pip install --upgrade pip
-```
-
-If you encounter an error with lib bcrypt causing an Wrong ELF Class:
-ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
-compile it under the right architecture. (This should not be needed if
-installing under virtualenv):
-
-```sh
-sudo pip uninstall py-bcrypt
-sudo pip install py-bcrypt
-```
-
-#### Void Linux
-
-Synapse can be found in the void repositories as 'synapse':
-
-```sh
-xbps-install -Su
-xbps-install -S synapse
-```
-
-#### FreeBSD
-
-Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
-
-- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
-- Packages: `pkg install py37-matrix-synapse`
-
-#### OpenBSD
-
-As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
-underlying the homeserver directory (defaults to `/var/synapse`) has to be
-mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
-and mounting it to `/var/synapse` should be taken into consideration.
-
-Installing Synapse:
-
-```sh
-doas pkg_add synapse
-```
-
-#### NixOS
-
-Robin Lambertz has packaged Synapse for NixOS at:
-
-
## Setting up Synapse
Once you have installed synapse as above, you will need to configure it.
--
cgit 1.5.1
From 176aa55fd5971610727cb10372faf521542653d9 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 29 Sep 2021 11:59:43 +0100
Subject: add event id to logcontext when handling incoming PDUs (#10936)
---
changelog.d/10936.misc | 1 +
synapse/federation/federation_server.py | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10936.misc
diff --git a/changelog.d/10936.misc b/changelog.d/10936.misc
new file mode 100644
index 0000000000..9d1d6e5b02
--- /dev/null
+++ b/changelog.d/10936.misc
@@ -0,0 +1 @@
+Include the event id in the logcontext when handling PDUs received over federation.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 638959cbec..83f11d6b88 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -1008,7 +1008,10 @@ class FederationServer(FederationBase):
async with lock:
logger.info("handling received PDU: %s", event)
try:
- await self._federation_event_handler.on_receive_pdu(origin, event)
+ with nested_logging_context(event.event_id):
+ await self._federation_event_handler.on_receive_pdu(
+ origin, event
+ )
except FederationError as e:
# XXX: Ideally we'd inform the remote we failed to process
# the event, but we can't return an error in the transaction
--
cgit 1.5.1
From 428174f90249ec50f977b5ef5c5cf9f92599ee0a Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 29 Sep 2021 18:59:15 +0100
Subject: Split `event_auth.check` into two parts (#10940)
Broadly, the existing `event_auth.check` function has two parts:
* a validation section: checks that the event isn't too big, that it has the rught signatures, etc.
This bit is independent of the rest of the state in the room, and so need only be done once
for each event.
* an auth section: ensures that the event is allowed, given the rest of the state in the room.
This gets done multiple times, against various sets of room state, because it forms part of
the state res algorithm.
Currently, this is implemented with `do_sig_check` and `do_size_check` parameters, but I think
that makes everything hard to follow. Instead, we split the function in two and call each part
separately where it is needed.
---
changelog.d/10940.misc | 1 +
synapse/event_auth.py | 153 +++++++++++++++++++++--------------
synapse/handlers/event_auth.py | 15 ++--
synapse/handlers/federation.py | 30 ++++---
synapse/handlers/federation_event.py | 18 +++--
synapse/handlers/message.py | 6 +-
synapse/handlers/room.py | 6 +-
synapse/state/v1.py | 8 +-
synapse/state/v2.py | 4 +-
tests/test_event_auth.py | 108 +++++++++----------------
10 files changed, 177 insertions(+), 172 deletions(-)
create mode 100644 changelog.d/10940.misc
diff --git a/changelog.d/10940.misc b/changelog.d/10940.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10940.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 5d7c6fa858..eef354de6e 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -41,42 +41,112 @@ from synapse.types import StateMap, UserID, get_domain_from_id
logger = logging.getLogger(__name__)
-def check(
- room_version_obj: RoomVersion,
- event: EventBase,
- auth_events: StateMap[EventBase],
- do_sig_check: bool = True,
- do_size_check: bool = True,
+def validate_event_for_room_version(
+ room_version_obj: RoomVersion, event: EventBase
) -> None:
- """Checks if this event is correctly authed.
+ """Ensure that the event complies with the limits, and has the right signatures
+
+ NB: does not *validate* the signatures - it assumes that any signatures present
+ have already been checked.
+
+ NB: it does not check that the event satisfies the auth rules (that is done in
+ check_auth_rules_for_event) - these tests are independent of the rest of the state
+ in the room.
+
+ NB: This is used to check events that have been received over federation. As such,
+ it can only enforce the checks specified in the relevant room version, to avoid
+ a split-brain situation where some servers accept such events, and others reject
+ them.
+
+ TODO: consider moving this into EventValidator
Args:
- room_version_obj: the version of the room
- event: the event being checked.
- auth_events: the existing room state.
- do_sig_check: True if it should be verified that the sending server
- signed the event.
- do_size_check: True if the size of the event fields should be verified.
+ room_version_obj: the version of the room which contains this event
+ event: the event to be checked
Raises:
- AuthError if the checks fail
-
- Returns:
- if the auth checks pass.
+ SynapseError if there is a problem with the event
"""
- assert isinstance(auth_events, dict)
-
- if do_size_check:
- _check_size_limits(event)
+ _check_size_limits(event)
if not hasattr(event, "room_id"):
raise AuthError(500, "Event has no room_id: %s" % event)
- room_id = event.room_id
+ # check that the event has the correct signatures
+ sender_domain = get_domain_from_id(event.sender)
+
+ is_invite_via_3pid = (
+ event.type == EventTypes.Member
+ and event.membership == Membership.INVITE
+ and "third_party_invite" in event.content
+ )
+
+ # Check the sender's domain has signed the event
+ if not event.signatures.get(sender_domain):
+ # We allow invites via 3pid to have a sender from a different
+ # HS, as the sender must match the sender of the original
+ # 3pid invite. This is checked further down with the
+ # other dedicated membership checks.
+ if not is_invite_via_3pid:
+ raise AuthError(403, "Event not signed by sender's server")
+
+ if event.format_version in (EventFormatVersions.V1,):
+ # Only older room versions have event IDs to check.
+ event_id_domain = get_domain_from_id(event.event_id)
+
+ # Check the origin domain has signed the event
+ if not event.signatures.get(event_id_domain):
+ raise AuthError(403, "Event not signed by sending server")
+
+ is_invite_via_allow_rule = (
+ room_version_obj.msc3083_join_rules
+ and event.type == EventTypes.Member
+ and event.membership == Membership.JOIN
+ and "join_authorised_via_users_server" in event.content
+ )
+ if is_invite_via_allow_rule:
+ authoriser_domain = get_domain_from_id(
+ event.content["join_authorised_via_users_server"]
+ )
+ if not event.signatures.get(authoriser_domain):
+ raise AuthError(403, "Event not signed by authorising server")
+
+
+def check_auth_rules_for_event(
+ room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase]
+) -> None:
+ """Check that an event complies with the auth rules
+
+ Checks whether an event passes the auth rules with a given set of state events
+
+ Assumes that we have already checked that the event is the right shape (it has
+ enough signatures, has a room ID, etc). In other words:
+
+ - it's fine for use in state resolution, when we have already decided whether to
+ accept the event or not, and are now trying to decide whether it should make it
+ into the room state
+
+ - when we're doing the initial event auth, it is only suitable in combination with
+ a bunch of other tests.
+
+ Args:
+ room_version_obj: the version of the room
+ event: the event being checked.
+ auth_events: the room state to check the events against.
+
+ Raises:
+ AuthError if the checks fail
+ """
+ assert isinstance(auth_events, dict)
# We need to ensure that the auth events are actually for the same room, to
# stop people from using powers they've been granted in other rooms for
# example.
+ #
+ # Arguably we don't need to do this when we're just doing state res, as presumably
+ # the state res algorithm isn't silly enough to give us events from different rooms.
+ # Still, it's easier to do it anyway.
+ room_id = event.room_id
for auth_event in auth_events.values():
if auth_event.room_id != room_id:
raise AuthError(
@@ -86,45 +156,6 @@ def check(
% (event.event_id, room_id, auth_event.event_id, auth_event.room_id),
)
- if do_sig_check:
- sender_domain = get_domain_from_id(event.sender)
-
- is_invite_via_3pid = (
- event.type == EventTypes.Member
- and event.membership == Membership.INVITE
- and "third_party_invite" in event.content
- )
-
- # Check the sender's domain has signed the event
- if not event.signatures.get(sender_domain):
- # We allow invites via 3pid to have a sender from a different
- # HS, as the sender must match the sender of the original
- # 3pid invite. This is checked further down with the
- # other dedicated membership checks.
- if not is_invite_via_3pid:
- raise AuthError(403, "Event not signed by sender's server")
-
- if event.format_version in (EventFormatVersions.V1,):
- # Only older room versions have event IDs to check.
- event_id_domain = get_domain_from_id(event.event_id)
-
- # Check the origin domain has signed the event
- if not event.signatures.get(event_id_domain):
- raise AuthError(403, "Event not signed by sending server")
-
- is_invite_via_allow_rule = (
- room_version_obj.msc3083_join_rules
- and event.type == EventTypes.Member
- and event.membership == Membership.JOIN
- and "join_authorised_via_users_server" in event.content
- )
- if is_invite_via_allow_rule:
- authoriser_domain = get_domain_from_id(
- event.content["join_authorised_via_users_server"]
- )
- if not event.signatures.get(authoriser_domain):
- raise AuthError(403, "Event not signed by authorising server")
-
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
#
# 1. If type is m.room.create:
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index cb81fa0986..d089c56286 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -22,7 +22,8 @@ from synapse.api.constants import (
RestrictedJoinRuleTypes,
)
from synapse.api.errors import AuthError, Codes, SynapseError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
+from synapse.api.room_versions import RoomVersion
+from synapse.event_auth import check_auth_rules_for_event
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
@@ -45,21 +46,17 @@ class EventAuthHandler:
self._store = hs.get_datastore()
self._server_name = hs.hostname
- async def check_from_context(
+ async def check_auth_rules_from_context(
self,
- room_version: str,
+ room_version_obj: RoomVersion,
event: EventBase,
context: EventContext,
- do_sig_check: bool = True,
) -> None:
+ """Check an event passes the auth rules at its own auth events"""
auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
-
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
- event_auth.check(
- room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
- )
+ check_auth_rules_for_event(room_version_obj, event, auth_events)
def compute_auth_events(
self,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 3b0b895b07..0a10a5c28a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -40,6 +40,10 @@ from synapse.api.errors import (
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
+from synapse.event_auth import (
+ check_auth_rules_for_event,
+ validate_event_for_room_version,
+)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
@@ -742,10 +746,9 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- await self._event_auth_handler.check_from_context(
- room_version.identifier, event, context, do_sig_check=False
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version, event, context
)
-
return event
async def on_invite_request(
@@ -916,8 +919,8 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- await self._event_auth_handler.check_from_context(
- room_version_obj.identifier, event, context, do_sig_check=False
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
@@ -978,8 +981,8 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
- await self._event_auth_handler.check_from_context(
- room_version_obj.identifier, event, context, do_sig_check=False
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
@@ -1168,7 +1171,8 @@ class FederationHandler(BaseHandler):
auth_for_e[(EventTypes.Create, "")] = create_event
try:
- event_auth.check(room_version, e, auth_events=auth_for_e)
+ validate_event_for_room_version(room_version, e)
+ check_auth_rules_for_event(room_version, e, auth_for_e)
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
@@ -1266,8 +1270,9 @@ class FederationHandler(BaseHandler):
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
- await self._event_auth_handler.check_from_context(
- room_version_obj.identifier, event, context
+ validate_event_for_room_version(room_version_obj, event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
@@ -1317,8 +1322,9 @@ class FederationHandler(BaseHandler):
)
try:
- await self._event_auth_handler.check_from_context(
- room_version_obj.identifier, event, context
+ validate_event_for_room_version(room_version_obj, event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 2c4644b4a3..e587b5b3b3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -29,7 +29,6 @@ from typing import (
from prometheus_client import Counter
-from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
@@ -47,7 +46,11 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.event_auth import auth_types_for_event
+from synapse.event_auth import (
+ auth_types_for_event,
+ check_auth_rules_for_event,
+ validate_event_for_room_version,
+)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_client import InvalidResponseError
@@ -1207,7 +1210,8 @@ class FederationEventHandler:
context = EventContext.for_outlier()
try:
- event_auth.check(room_version_obj, event, auth_events=auth)
+ validate_event_for_room_version(room_version_obj, event)
+ check_auth_rules_for_event(room_version_obj, event, auth)
except AuthError as e:
logger.warning("Rejecting %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
@@ -1282,7 +1286,8 @@ class FederationEventHandler:
auth_events_for_auth = calculated_auth_event_map
try:
- event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
+ validate_event_for_room_version(room_version_obj, event)
+ check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth)
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
@@ -1394,7 +1399,10 @@ class FederationEventHandler:
}
try:
- event_auth.check(room_version_obj, event, auth_events=current_auth_events)
+ # TODO: skip the call to validate_event_for_room_version? we should already
+ # have validated the event.
+ validate_event_for_room_version(room_version_obj, event)
+ check_auth_rules_for_event(room_version_obj, event, current_auth_events)
except AuthError as e:
logger.warning(
"Soft-failing %r (from %s) because %s",
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 3b8cc50ec0..cdac53037c 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -44,6 +44,7 @@ from synapse.api.errors import (
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.api.urls import ConsentURIBuilder
+from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
@@ -1098,8 +1099,9 @@ class EventCreationHandler:
assert event.content["membership"] == Membership.LEAVE
else:
try:
- await self._event_auth_handler.check_from_context(
- room_version_obj.identifier, event, context
+ validate_event_for_room_version(room_version_obj, event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ room_version_obj, event, context
)
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index bf8a85f563..873e08258e 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -52,6 +52,7 @@ from synapse.api.errors import (
)
from synapse.api.filtering import Filter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
+from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_power_levels_contents
from synapse.rest.admin._base import assert_user_is_admin
@@ -238,8 +239,9 @@ class RoomCreationHandler(BaseHandler):
},
)
old_room_version = await self.store.get_room_version(old_room_id)
- await self._event_auth_handler.check_from_context(
- old_room_version.identifier, tombstone_event, tombstone_context
+ validate_event_for_room_version(old_room_version, tombstone_event)
+ await self._event_auth_handler.check_auth_rules_from_context(
+ old_room_version, tombstone_event, tombstone_context
)
await self.clone_existing_room(
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index 92336d7cc8..017e6fd92d 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -329,12 +329,10 @@ def _resolve_auth_events(
auth_events[(prev_event.type, prev_event.state_key)] = prev_event
try:
# The signatures have already been checked at this point
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
event,
auth_events,
- do_sig_check=False,
- do_size_check=False,
)
prev_event = event
except AuthError:
@@ -349,12 +347,10 @@ def _resolve_normal_events(
for event in _ordered_events(events):
try:
# The signatures have already been checked at this point
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
event,
auth_events,
- do_sig_check=False,
- do_size_check=False,
)
return event
except AuthError:
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 7b1e8361de..586b0e12fe 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -546,12 +546,10 @@ async def _iterative_auth_checks(
auth_events[key] = event_map[ev_id]
try:
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
room_version,
event,
auth_events,
- do_sig_check=False,
- do_size_check=False,
)
resolved_state[(event.type, event.state_key)] = event_id
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index 6ebd01bcbe..e7a7d00883 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -37,21 +37,19 @@ class EventAuthTestCase(unittest.TestCase):
}
# creator should be able to send state
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_random_state_event(creator),
auth_events,
- do_sig_check=False,
)
# joiner should not be able to send state
self.assertRaises(
AuthError,
- event_auth.check,
+ event_auth.check_auth_rules_for_event,
RoomVersions.V1,
_random_state_event(joiner),
auth_events,
- do_sig_check=False,
)
def test_state_default_level(self):
@@ -76,19 +74,17 @@ class EventAuthTestCase(unittest.TestCase):
# pleb should not be able to send state
self.assertRaises(
AuthError,
- event_auth.check,
+ event_auth.check_auth_rules_for_event,
RoomVersions.V1,
_random_state_event(pleb),
auth_events,
- do_sig_check=False,
),
# king should be able to send state
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_random_state_event(king),
auth_events,
- do_sig_check=False,
)
def test_alias_event(self):
@@ -101,37 +97,33 @@ class EventAuthTestCase(unittest.TestCase):
}
# creator should be able to send aliases
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_alias_event(creator),
auth_events,
- do_sig_check=False,
)
# Reject an event with no state key.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_alias_event(creator, state_key=""),
auth_events,
- do_sig_check=False,
)
# If the domain of the sender does not match the state key, reject.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_alias_event(creator, state_key="test.com"),
auth_events,
- do_sig_check=False,
)
# Note that the member does *not* need to be in the room.
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_alias_event(other),
auth_events,
- do_sig_check=False,
)
def test_msc2432_alias_event(self):
@@ -144,34 +136,30 @@ class EventAuthTestCase(unittest.TestCase):
}
# creator should be able to send aliases
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_alias_event(creator),
auth_events,
- do_sig_check=False,
)
# No particular checks are done on the state key.
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_alias_event(creator, state_key=""),
auth_events,
- do_sig_check=False,
)
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_alias_event(creator, state_key="test.com"),
auth_events,
- do_sig_check=False,
)
# Per standard auth rules, the member must be in the room.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_alias_event(other),
auth_events,
- do_sig_check=False,
)
def test_msc2209(self):
@@ -191,20 +179,18 @@ class EventAuthTestCase(unittest.TestCase):
}
# pleb should be able to modify the notifications power level.
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V1,
_power_levels_event(pleb, {"notifications": {"room": 100}}),
auth_events,
- do_sig_check=False,
)
# But an MSC2209 room rejects this change.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_power_levels_event(pleb, {"notifications": {"room": 100}}),
auth_events,
- do_sig_check=False,
)
def test_join_rules_public(self):
@@ -221,59 +207,53 @@ class EventAuthTestCase(unittest.TestCase):
}
# Check join.
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
auth_events,
- do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user who left can re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
def test_join_rules_invite(self):
@@ -291,60 +271,54 @@ class EventAuthTestCase(unittest.TestCase):
# A join without an invite is rejected.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
auth_events,
- do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user who left cannot re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
def test_join_rules_msc3083_restricted(self):
@@ -369,11 +343,10 @@ class EventAuthTestCase(unittest.TestCase):
# Older room versions don't understand this join rule
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A properly formatted join event should work.
@@ -383,11 +356,10 @@ class EventAuthTestCase(unittest.TestCase):
"join_authorised_via_users_server": "@creator:example.com"
},
)
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
authorised_join_event,
auth_events,
- do_sig_check=False,
)
# A join issued by a specific user works (i.e. the power level checks
@@ -399,7 +371,7 @@ class EventAuthTestCase(unittest.TestCase):
pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event(
"@inviter:foo.test"
)
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(
pleb,
@@ -408,16 +380,14 @@ class EventAuthTestCase(unittest.TestCase):
},
),
pl_auth_events,
- do_sig_check=False,
)
# A join which is missing an authorised server is rejected.
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# An join authorised by a user who is not in the room is rejected.
@@ -426,7 +396,7 @@ class EventAuthTestCase(unittest.TestCase):
creator, {"invite": 100, "users": {"@other:example.com": 150}}
)
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(
pleb,
@@ -435,13 +405,12 @@ class EventAuthTestCase(unittest.TestCase):
},
),
auth_events,
- do_sig_check=False,
)
# A user cannot be force-joined to a room. (This uses an event which
# *would* be valid, but is sent be a different user.)
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_member_event(
pleb,
@@ -452,36 +421,32 @@ class EventAuthTestCase(unittest.TestCase):
},
),
auth_events,
- do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
authorised_join_event,
auth_events,
- do_sig_check=False,
)
# A user who left can re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
authorised_join_event,
auth_events,
- do_sig_check=False,
)
# A user can send a join if they're in the room. (This doesn't need to
# be authorised since the user is already joined.)
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
# A user can accept an invite. (This doesn't need to be authorised since
@@ -489,11 +454,10 @@ class EventAuthTestCase(unittest.TestCase):
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
- event_auth.check(
+ event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(pleb),
auth_events,
- do_sig_check=False,
)
--
cgit 1.5.1
From 3aefc7b66d9c7fb98addc71eaf5ef501a4c6a583 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Thu, 30 Sep 2021 11:04:40 +0100
Subject: Refactor user directory tests (#10935)
* Pull out GetUserDirectoryTables helper
* Don't rebuild the dir in tests that don't need it
In #10796 I changed registering a user to add directory entries under.
This means we don't have to force a directory regbuild in to tests of
the user directory search.
* Move test_initial to tests/storage
* Add type hints to both test_user_directory files
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10935.misc | 1 +
mypy.ini | 6 +
synapse/storage/databases/main/user_directory.py | 2 +-
tests/handlers/test_user_directory.py | 283 +++++++----------------
tests/storage/test_user_directory.py | 192 ++++++++++++++-
tests/unittest.py | 4 +-
6 files changed, 288 insertions(+), 200 deletions(-)
create mode 100644 changelog.d/10935.misc
diff --git a/changelog.d/10935.misc b/changelog.d/10935.misc
new file mode 100644
index 0000000000..80529c04ca
--- /dev/null
+++ b/changelog.d/10935.misc
@@ -0,0 +1 @@
+Refactor user directory tests in preparation for upcoming changes.
diff --git a/mypy.ini b/mypy.ini
index 437d0a46a5..568166db33 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -162,6 +162,12 @@ disallow_untyped_defs = True
[mypy-synapse.util.wheel_timer]
disallow_untyped_defs = True
+[mypy-tests.handlers.test_user_directory]
+disallow_untyped_defs = True
+
+[mypy-tests.storage.test_user_directory]
+disallow_untyped_defs = True
+
[mypy-pymacaroons.*]
ignore_missing_imports = True
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 90d65edc42..c26e3e066f 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -527,7 +527,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
desc="get_user_in_directory",
)
- async def update_user_directory_stream_pos(self, stream_id: int) -> None:
+ async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> None:
await self.db_pool.simple_update_one(
table="user_directory_stream_pos",
keyvalues={},
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 266333c553..2988befb21 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -11,26 +11,37 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Tuple
-from unittest.mock import Mock
+from unittest.mock import Mock, patch
from urllib.parse import quote
from twisted.internet import defer
+from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import UserTypes
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.rest.client import login, room, user_directory
+from synapse.server import HomeServer
from synapse.storage.roommember import ProfileInfo
from synapse.types import create_requester
+from synapse.util import Clock
from tests import unittest
+from tests.storage.test_user_directory import GetUserDirectoryTables
from tests.unittest import override_config
class UserDirectoryTestCase(unittest.HomeserverTestCase):
- """
- Tests the UserDirectoryHandler.
+ """Tests the UserDirectoryHandler.
+
+ We're broadly testing two kinds of things here.
+
+ 1. Check that we correctly update the user directory in response
+ to events (e.g. join a room, leave a room, change name, make public)
+ 2. Check that the search logic behaves as expected.
+
+ The background process that rebuilds the user directory is tested in
+ tests/storage/test_user_directory.py.
"""
servlets = [
@@ -39,19 +50,19 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
room.register_servlets,
]
- def make_homeserver(self, reactor, clock):
-
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["update_user_directory"] = True
return self.setup_test_homeserver(config=config)
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastore()
self.handler = hs.get_user_directory_handler()
self.event_builder_factory = self.hs.get_event_builder_factory()
self.event_creation_handler = self.hs.get_event_creation_handler()
+ self.user_dir_helper = GetUserDirectoryTables(self.store)
- def test_handle_local_profile_change_with_support_user(self):
+ def test_handle_local_profile_change_with_support_user(self) -> None:
support_user_id = "@support:test"
self.get_success(
self.store.register_user(
@@ -64,7 +75,9 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
self.get_success(
- self.handler.handle_local_profile_change(support_user_id, None)
+ self.handler.handle_local_profile_change(
+ support_user_id, ProfileInfo("I love support me", None)
+ )
)
profile = self.get_success(self.store.get_user_in_directory(support_user_id))
self.assertTrue(profile is None)
@@ -77,7 +90,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
self.assertTrue(profile["display_name"] == display_name)
- def test_handle_local_profile_change_with_deactivated_user(self):
+ def test_handle_local_profile_change_with_deactivated_user(self) -> None:
# create user
r_user_id = "@regular:test"
self.get_success(
@@ -112,7 +125,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
self.assertTrue(profile is None)
- def test_handle_user_deactivated_support_user(self):
+ def test_handle_user_deactivated_support_user(self) -> None:
s_user_id = "@support:test"
self.get_success(
self.store.register_user(
@@ -120,20 +133,29 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
)
- self.store.remove_from_user_dir = Mock(return_value=defer.succeed(None))
- self.get_success(self.handler.handle_local_user_deactivated(s_user_id))
- self.store.remove_from_user_dir.not_called()
+ mock_remove_from_user_dir = Mock(return_value=defer.succeed(None))
+ with patch.object(
+ self.store, "remove_from_user_dir", mock_remove_from_user_dir
+ ):
+ self.get_success(self.handler.handle_local_user_deactivated(s_user_id))
+ # BUG: the correct spelling is assert_not_called, but that makes the test fail
+ # and it's not clear that this is actually the behaviour we want.
+ mock_remove_from_user_dir.not_called()
- def test_handle_user_deactivated_regular_user(self):
+ def test_handle_user_deactivated_regular_user(self) -> None:
r_user_id = "@regular:test"
self.get_success(
self.store.register_user(user_id=r_user_id, password_hash=None)
)
- self.store.remove_from_user_dir = Mock(return_value=defer.succeed(None))
- self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
- self.store.remove_from_user_dir.called_once_with(r_user_id)
- def test_reactivation_makes_regular_user_searchable(self):
+ mock_remove_from_user_dir = Mock(return_value=defer.succeed(None))
+ with patch.object(
+ self.store, "remove_from_user_dir", mock_remove_from_user_dir
+ ):
+ self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
+ mock_remove_from_user_dir.assert_called_once_with(r_user_id)
+
+ def test_reactivation_makes_regular_user_searchable(self) -> None:
user = self.register_user("regular", "pass")
user_token = self.login(user, "pass")
admin_user = self.register_user("admin", "pass", admin=True)
@@ -171,7 +193,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.assertEqual(len(s["results"]), 1)
self.assertEqual(s["results"][0]["user_id"], user)
- def test_private_room(self):
+ def test_private_room(self) -> None:
"""
A user can be searched for only by people that are either in a public
room, or that share a private chat.
@@ -191,11 +213,16 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.helper.join(room, user=u2, tok=u2_token)
# Check we have populated the database correctly.
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
self.assertEqual(
- self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)}
+ self.user_dir_helper._compress_shared(shares_private),
+ {(u1, u2, room), (u2, u1, room)},
)
self.assertEqual(public_users, [])
@@ -215,10 +242,14 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.helper.leave(room, user=u2, tok=u2_token)
# Check we have removed the values.
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
- self.assertEqual(self._compress_shared(shares_private), set())
+ self.assertEqual(self.user_dir_helper._compress_shared(shares_private), set())
self.assertEqual(public_users, [])
# User1 now gets no search results for any of the other users.
@@ -228,7 +259,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
s = self.get_success(self.handler.search_users(u1, "user3", 10))
self.assertEqual(len(s["results"]), 0)
- def test_spam_checker(self):
+ def test_spam_checker(self) -> None:
"""
A user which fails the spam checks will not appear in search results.
"""
@@ -246,11 +277,16 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.helper.join(room, user=u2, tok=u2_token)
# Check we have populated the database correctly.
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
self.assertEqual(
- self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)}
+ self.user_dir_helper._compress_shared(shares_private),
+ {(u1, u2, room), (u2, u1, room)},
)
self.assertEqual(public_users, [])
@@ -258,7 +294,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
s = self.get_success(self.handler.search_users(u1, "user2", 10))
self.assertEqual(len(s["results"]), 1)
- async def allow_all(user_profile):
+ async def allow_all(user_profile: ProfileInfo) -> bool:
# Allow all users.
return False
@@ -272,7 +308,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.assertEqual(len(s["results"]), 1)
# Configure a spam checker that filters all users.
- async def block_all(user_profile):
+ async def block_all(user_profile: ProfileInfo) -> bool:
# All users are spammy.
return True
@@ -282,7 +318,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
s = self.get_success(self.handler.search_users(u1, "user2", 10))
self.assertEqual(len(s["results"]), 0)
- def test_legacy_spam_checker(self):
+ def test_legacy_spam_checker(self) -> None:
"""
A spam checker without the expected method should be ignored.
"""
@@ -300,11 +336,16 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.helper.join(room, user=u2, tok=u2_token)
# Check we have populated the database correctly.
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
self.assertEqual(
- self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)}
+ self.user_dir_helper._compress_shared(shares_private),
+ {(u1, u2, room), (u2, u1, room)},
)
self.assertEqual(public_users, [])
@@ -317,134 +358,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
s = self.get_success(self.handler.search_users(u1, "user2", 10))
self.assertEqual(len(s["results"]), 1)
- def _compress_shared(self, shared):
- """
- Compress a list of users who share rooms dicts to a list of tuples.
- """
- r = set()
- for i in shared:
- r.add((i["user_id"], i["other_user_id"], i["room_id"]))
- return r
-
- def get_users_in_public_rooms(self) -> List[Tuple[str, str]]:
- r = self.get_success(
- self.store.db_pool.simple_select_list(
- "users_in_public_rooms", None, ("user_id", "room_id")
- )
- )
- retval = []
- for i in r:
- retval.append((i["user_id"], i["room_id"]))
- return retval
-
- def get_users_who_share_private_rooms(self) -> List[Tuple[str, str, str]]:
- return self.get_success(
- self.store.db_pool.simple_select_list(
- "users_who_share_private_rooms",
- None,
- ["user_id", "other_user_id", "room_id"],
- )
- )
-
- def _add_background_updates(self):
- """
- Add the background updates we need to run.
- """
- # Ugh, have to reset this flag
- self.store.db_pool.updates._all_done = False
-
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {
- "update_name": "populate_user_directory_createtables",
- "progress_json": "{}",
- },
- )
- )
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {
- "update_name": "populate_user_directory_process_rooms",
- "progress_json": "{}",
- "depends_on": "populate_user_directory_createtables",
- },
- )
- )
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {
- "update_name": "populate_user_directory_process_users",
- "progress_json": "{}",
- "depends_on": "populate_user_directory_process_rooms",
- },
- )
- )
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {
- "update_name": "populate_user_directory_cleanup",
- "progress_json": "{}",
- "depends_on": "populate_user_directory_process_users",
- },
- )
- )
-
- def test_initial(self):
- """
- The user directory's initial handler correctly updates the search tables.
- """
- u1 = self.register_user("user1", "pass")
- u1_token = self.login(u1, "pass")
- u2 = self.register_user("user2", "pass")
- u2_token = self.login(u2, "pass")
- u3 = self.register_user("user3", "pass")
- u3_token = self.login(u3, "pass")
-
- room = self.helper.create_room_as(u1, is_public=True, tok=u1_token)
- self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
- self.helper.join(room, user=u2, tok=u2_token)
-
- private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
- self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token)
- self.helper.join(private_room, user=u3, tok=u3_token)
-
- self.get_success(self.store.update_user_directory_stream_pos(None))
- self.get_success(self.store.delete_all_from_user_dir())
-
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
-
- # Nothing updated yet
- self.assertEqual(shares_private, [])
- self.assertEqual(public_users, [])
-
- # Do the initial population of the user directory via the background update
- self._add_background_updates()
-
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
-
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
-
- # User 1 and User 2 are in the same public room
- self.assertEqual(set(public_users), {(u1, room), (u2, room)})
-
- # User 1 and User 3 share private rooms
- self.assertEqual(
- self._compress_shared(shares_private),
- {(u1, u3, private_room), (u3, u1, private_room)},
- )
-
- def test_initial_share_all_users(self):
+ def test_initial_share_all_users(self) -> None:
"""
Search all users = True means that a user does not have to share a
private room with the searching user or be in a public room to be search
@@ -457,26 +371,16 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.register_user("user2", "pass")
u3 = self.register_user("user3", "pass")
- # Wipe the user dir
- self.get_success(self.store.update_user_directory_stream_pos(None))
- self.get_success(self.store.delete_all_from_user_dir())
-
- # Do the initial population of the user directory via the background update
- self._add_background_updates()
-
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
-
- shares_private = self.get_users_who_share_private_rooms()
- public_users = self.get_users_in_public_rooms()
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
# No users share rooms
self.assertEqual(public_users, [])
- self.assertEqual(self._compress_shared(shares_private), set())
+ self.assertEqual(self.user_dir_helper._compress_shared(shares_private), set())
# Despite not sharing a room, search_all_users means we get a search
# result.
@@ -501,7 +405,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
}
}
)
- def test_prefer_local_users(self):
+ def test_prefer_local_users(self) -> None:
"""Tests that local users are shown higher in search results when
user_directory.prefer_local_users is True.
"""
@@ -535,15 +439,6 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
local_users = [local_user_1, local_user_2, local_user_3]
remote_users = [remote_user_1, remote_user_2, remote_user_3]
- # Populate the user directory via background update
- self._add_background_updates()
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
-
# The local searching user searches for the term "user", which other users have
# in their user id
results = self.get_success(
@@ -565,7 +460,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
room_id: str,
room_version: RoomVersion,
user_id: str,
- ):
+ ) -> None:
# Add a user to the room.
builder = self.event_builder_factory.for_room_version(
room_version,
@@ -597,7 +492,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
synapse.rest.admin.register_servlets_for_client_rest_resource,
]
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["update_user_directory"] = True
hs = self.setup_test_homeserver(config=config)
@@ -606,7 +501,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
return hs
- def test_disabling_room_list(self):
+ def test_disabling_room_list(self) -> None:
self.config.userdirectory.user_directory_search_enabled = True
# First we create a room with another user so that user dir is non-empty
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 222e5d129d..74c8a8599e 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -11,6 +11,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Dict, List, Set, Tuple
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage import DataStore
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase, override_config
@@ -21,8 +30,183 @@ BOBBY = "@bobby:a"
BELA = "@somenickname:a"
+class GetUserDirectoryTables:
+ """Helper functions that we want to reuse in tests/handlers/test_user_directory.py"""
+
+ def __init__(self, store: DataStore):
+ self.store = store
+
+ def _compress_shared(
+ self, shared: List[Dict[str, str]]
+ ) -> Set[Tuple[str, str, str]]:
+ """
+ Compress a list of users who share rooms dicts to a list of tuples.
+ """
+ r = set()
+ for i in shared:
+ r.add((i["user_id"], i["other_user_id"], i["room_id"]))
+ return r
+
+ async def get_users_in_public_rooms(self) -> List[Tuple[str, str]]:
+ r = await self.store.db_pool.simple_select_list(
+ "users_in_public_rooms", None, ("user_id", "room_id")
+ )
+
+ retval = []
+ for i in r:
+ retval.append((i["user_id"], i["room_id"]))
+ return retval
+
+ async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]:
+ return await self.store.db_pool.simple_select_list(
+ "users_who_share_private_rooms",
+ None,
+ ["user_id", "other_user_id", "room_id"],
+ )
+
+
+class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
+ """Ensure that rebuilding the directory writes the correct data to the DB.
+
+ See also tests/handlers/test_user_directory.py for similar checks. They
+ test the incremental updates, rather than the big rebuild.
+ """
+
+ servlets = [
+ login.register_servlets,
+ admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastore()
+ self.user_dir_helper = GetUserDirectoryTables(self.store)
+
+ def _purge_and_rebuild_user_dir(self) -> None:
+ """Nuke the user directory tables, start the background process to
+ repopulate them, and wait for the process to complete. This allows us
+ to inspect the outcome of the background process alone, without any of
+ the other incremental updates.
+ """
+ self.get_success(self.store.update_user_directory_stream_pos(None))
+ self.get_success(self.store.delete_all_from_user_dir())
+
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
+
+ # Nothing updated yet
+ self.assertEqual(shares_private, [])
+ self.assertEqual(public_users, [])
+
+ # Ugh, have to reset this flag
+ self.store.db_pool.updates._all_done = False
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_user_directory_createtables",
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_user_directory_process_rooms",
+ "progress_json": "{}",
+ "depends_on": "populate_user_directory_createtables",
+ },
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_user_directory_process_users",
+ "progress_json": "{}",
+ "depends_on": "populate_user_directory_process_rooms",
+ },
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_user_directory_cleanup",
+ "progress_json": "{}",
+ "depends_on": "populate_user_directory_process_users",
+ },
+ )
+ )
+
+ while not self.get_success(
+ self.store.db_pool.updates.has_completed_background_updates()
+ ):
+ self.get_success(
+ self.store.db_pool.updates.do_next_background_update(100), by=0.1
+ )
+
+ def test_initial(self) -> None:
+ """
+ The user directory's initial handler correctly updates the search tables.
+ """
+ u1 = self.register_user("user1", "pass")
+ u1_token = self.login(u1, "pass")
+ u2 = self.register_user("user2", "pass")
+ u2_token = self.login(u2, "pass")
+ u3 = self.register_user("user3", "pass")
+ u3_token = self.login(u3, "pass")
+
+ room = self.helper.create_room_as(u1, is_public=True, tok=u1_token)
+ self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
+ self.helper.join(room, user=u2, tok=u2_token)
+
+ private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
+ self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token)
+ self.helper.join(private_room, user=u3, tok=u3_token)
+
+ self.get_success(self.store.update_user_directory_stream_pos(None))
+ self.get_success(self.store.delete_all_from_user_dir())
+
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
+
+ # Nothing updated yet
+ self.assertEqual(shares_private, [])
+ self.assertEqual(public_users, [])
+
+ # Do the initial population of the user directory via the background update
+ self._purge_and_rebuild_user_dir()
+
+ shares_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ public_users = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
+
+ # User 1 and User 2 are in the same public room
+ self.assertEqual(set(public_users), {(u1, room), (u2, room)})
+
+ # User 1 and User 3 share private rooms
+ self.assertEqual(
+ self.user_dir_helper._compress_shared(shares_private),
+ {(u1, u3, private_room), (u3, u1, private_room)},
+ )
+
+
class UserDirectoryStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastore()
# alice and bob are both in !room_id. bobby is not but shares
@@ -33,7 +217,7 @@ class UserDirectoryStoreTestCase(HomeserverTestCase):
self.get_success(self.store.update_profile_in_user_dir(BELA, "Bela", None))
self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB)))
- def test_search_user_dir(self):
+ def test_search_user_dir(self) -> None:
# normally when alice searches the directory she should just find
# bob because bobby doesn't share a room with her.
r = self.get_success(self.store.search_user_dir(ALICE, "bob", 10))
@@ -44,7 +228,7 @@ class UserDirectoryStoreTestCase(HomeserverTestCase):
)
@override_config({"user_directory": {"search_all_users": True}})
- def test_search_user_dir_all_users(self):
+ def test_search_user_dir_all_users(self) -> None:
r = self.get_success(self.store.search_user_dir(ALICE, "bob", 10))
self.assertFalse(r["limited"])
self.assertEqual(2, len(r["results"]))
@@ -58,7 +242,7 @@ class UserDirectoryStoreTestCase(HomeserverTestCase):
)
@override_config({"user_directory": {"search_all_users": True}})
- def test_search_user_dir_stop_words(self):
+ def test_search_user_dir_stop_words(self) -> None:
"""Tests that a user can look up another user by searching for the start if its
display name even if that name happens to be a common English word that would
usually be ignored in full text searches.
diff --git a/tests/unittest.py b/tests/unittest.py
index 6d5d87cb78..5f93ebf147 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -28,6 +28,7 @@ from canonicaljson import json
from twisted.internet.defer import Deferred, ensureDeferred, succeed
from twisted.python.failure import Failure
from twisted.python.threadpool import ThreadPool
+from twisted.test.proto_helpers import MemoryReactor
from twisted.trial import unittest
from twisted.web.resource import Resource
@@ -46,6 +47,7 @@ from synapse.logging.context import (
)
from synapse.server import HomeServer
from synapse.types import UserID, create_requester
+from synapse.util import Clock
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.ratelimitutils import FederationRateLimiter
@@ -371,7 +373,7 @@ class HomeserverTestCase(TestCase):
return config
- def prepare(self, reactor, clock, homeserver):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer):
"""
Prepare for the test. This involves things like mocking out parts of
the homeserver, or building test data common across the whole test
--
cgit 1.5.1
From a03ed5e6ae23e52941e91ecb892a7b5c88964d90 Mon Sep 17 00:00:00 2001
From: reivilibre
Date: Thu, 30 Sep 2021 11:06:47 +0100
Subject: Fix issue causing sending presence to ASes to fail (due to incomplete
type annotations) (#10944)
---
changelog.d/10944.bugfix | 1 +
synapse/handlers/presence.py | 4 +++-
2 files changed, 4 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10944.bugfix
diff --git a/changelog.d/10944.bugfix b/changelog.d/10944.bugfix
new file mode 100644
index 0000000000..49baff7df1
--- /dev/null
+++ b/changelog.d/10944.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in v1.44.0rc1 which prevented sending presence events to application services.
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 983c837c66..404afb9402 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -52,6 +52,7 @@ import synapse.metrics
from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
+from synapse.appservice import ApplicationService
from synapse.events.presence_router import PresenceRouter
from synapse.logging.context import run_in_background
from synapse.logging.utils import log_function
@@ -1521,10 +1522,11 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
user: UserID,
from_key: Optional[int],
limit: Optional[int] = None,
- room_ids: Optional[List[str]] = None,
+ room_ids: Optional[Collection[str]] = None,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
include_offline: bool = True,
+ service: Optional[ApplicationService] = None,
) -> Tuple[List[UserPresenceState], int]:
# The process for getting presence events are:
# 1. Get the rooms the user is in.
--
cgit 1.5.1
From c4bf48ee6fa4662d88a5bf682e79787851fe9cd8 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 28 Sep 2021 22:00:04 -0500
Subject: Fix event context for outliers in important MSC2716 spot (#10938)
Fix event context for outlier causing failures in all of the MSC2716
Complement tests.
The `EventContext.for_outlier` refactor happened in
https://github.com/matrix-org/synapse/pull/10883
and this spot was left out.
---
changelog.d/10938.bugfix | 1 +
synapse/handlers/message.py | 13 ++++---------
2 files changed, 5 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/10938.bugfix
diff --git a/changelog.d/10938.bugfix b/changelog.d/10938.bugfix
new file mode 100644
index 0000000000..9cf0ea8788
--- /dev/null
+++ b/changelog.d/10938.bugfix
@@ -0,0 +1 @@
+Fix bug introduced in Synapse 1.44 which caused the experimental [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint to return a 500 error.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c66aefe2c4..fd861e94f8 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -952,18 +952,13 @@ class EventCreationHandler:
depth=depth,
)
- old_state = None
-
# Pass on the outlier property from the builder to the event
# after it is created
if builder.internal_metadata.outlier:
- event.internal_metadata.outlier = builder.internal_metadata.outlier
-
- # Calculate the state for outliers that pass in their own `auth_event_ids`
- if auth_event_ids:
- old_state = await self.store.get_events_as_list(auth_event_ids)
-
- context = await self.state.compute_event_context(event, old_state=old_state)
+ event.internal_metadata.outlier = True
+ context = EventContext.for_outlier()
+ else:
+ context = await self.state.compute_event_context(event)
if requester:
context.app_service = requester.app_service
--
cgit 1.5.1
From 3412f5c8d8c8aff5bcf9b0e5012dfa2f4e895464 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Thu, 30 Sep 2021 12:40:24 +0100
Subject: 1.44.0rc2
---
CHANGES.md | 16 ++++++++++++++++
changelog.d/10919.doc | 1 -
changelog.d/10938.bugfix | 1 -
changelog.d/10944.bugfix | 1 -
debian/changelog | 6 ++++++
synapse/__init__.py | 2 +-
6 files changed, 23 insertions(+), 4 deletions(-)
delete mode 100644 changelog.d/10919.doc
delete mode 100644 changelog.d/10938.bugfix
delete mode 100644 changelog.d/10944.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index 271e2271fb..59ff967633 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,19 @@
+Synapse 1.44.0rc2 (2021-09-30)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in v1.44.0rc1 which caused the experimental [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint to return a 500 error. ([\#10938](https://github.com/matrix-org/synapse/issues/10938))
+- Fix a bug introduced in v1.44.0rc1 which prevented sending presence events to application services. ([\#10944](https://github.com/matrix-org/synapse/issues/10944))
+
+
+Improved Documentation
+----------------------
+
+- Minor updates to the installation instructions. ([\#10919](https://github.com/matrix-org/synapse/issues/10919))
+
+
Synapse 1.44.0rc1 (2021-09-29)
==============================
diff --git a/changelog.d/10919.doc b/changelog.d/10919.doc
deleted file mode 100644
index d0bddc3f1b..0000000000
--- a/changelog.d/10919.doc
+++ /dev/null
@@ -1 +0,0 @@
-Minor updates to the installation instructions.
diff --git a/changelog.d/10938.bugfix b/changelog.d/10938.bugfix
deleted file mode 100644
index 9cf0ea8788..0000000000
--- a/changelog.d/10938.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug introduced in Synapse 1.44 which caused the experimental [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint to return a 500 error.
diff --git a/changelog.d/10944.bugfix b/changelog.d/10944.bugfix
deleted file mode 100644
index 49baff7df1..0000000000
--- a/changelog.d/10944.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in v1.44.0rc1 which prevented sending presence events to application services.
diff --git a/debian/changelog b/debian/changelog
index 191bb97c5e..b08a592780 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.44.0~rc2) stable; urgency=medium
+
+ * New synapse release 1.44.0~rc2.
+
+ -- Synapse Packaging team Thu, 30 Sep 2021 12:39:10 +0100
+
matrix-synapse-py3 (1.44.0~rc1) stable; urgency=medium
* New synapse release 1.44.0~rc1.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index a1fec8ad2b..8791c20e26 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.44.0rc1"
+__version__ = "1.44.0rc2"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
--
cgit 1.5.1
From 29364145b29e84c5dcab076c4e0d436ebf77e4cd Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Thu, 30 Sep 2021 12:51:47 +0100
Subject: Pass str to twisted's IReactorTCP (#10895)
This follows a correction made in twisted/twisted#1664 and should fix our Twisted Trial CI job.
Until that change is in a twisted release, we'll have to ignore the type
of the `host` argument. I've raised #10899 to remind us to review the
issue in a few months' time.
---
changelog.d/10895.misc | 1 +
synapse/handlers/send_email.py | 9 +++++++--
synapse/replication/tcp/handler.py | 8 ++++++--
synapse/replication/tcp/redis.py | 8 +++++++-
tests/replication/_base.py | 4 ++--
tests/server.py | 8 ++++----
6 files changed, 27 insertions(+), 11 deletions(-)
create mode 100644 changelog.d/10895.misc
diff --git a/changelog.d/10895.misc b/changelog.d/10895.misc
new file mode 100644
index 0000000000..d1c8224980
--- /dev/null
+++ b/changelog.d/10895.misc
@@ -0,0 +1 @@
+Fix type hints to be compatible with an upcoming change to Twisted.
\ No newline at end of file
diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py
index 25e6b012b7..1a062a784c 100644
--- a/synapse/handlers/send_email.py
+++ b/synapse/handlers/send_email.py
@@ -105,8 +105,13 @@ async def _sendmail(
# set to enable TLS.
factory = build_sender_factory(hostname=smtphost if enable_tls else None)
- # the IReactorTCP interface claims host has to be a bytes, which seems to be wrong
- reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type]
+ reactor.connectTCP(
+ smtphost, # type: ignore[arg-type]
+ smtpport,
+ factory,
+ timeout=30,
+ bindAddress=None,
+ )
await make_deferred_yieldable(d)
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index 1438a82b60..d64d1dbacd 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -315,7 +315,7 @@ class ReplicationCommandHandler:
hs, outbound_redis_connection
)
hs.get_reactor().connectTCP(
- hs.config.redis.redis_host.encode(),
+ hs.config.redis.redis_host, # type: ignore[arg-type]
hs.config.redis.redis_port,
self._factory,
)
@@ -324,7 +324,11 @@ class ReplicationCommandHandler:
self._factory = DirectTcpReplicationClientFactory(hs, client_name, self)
host = hs.config.worker.worker_replication_host
port = hs.config.worker.worker_replication_port
- hs.get_reactor().connectTCP(host.encode(), port, self._factory)
+ hs.get_reactor().connectTCP(
+ host, # type: ignore[arg-type]
+ port,
+ self._factory,
+ )
def get_streams(self) -> Dict[str, Stream]:
"""Get a map from stream name to all streams."""
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index 8c0df627c8..062fe2f33e 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -364,6 +364,12 @@ def lazyConnection(
factory.continueTrying = reconnect
reactor = hs.get_reactor()
- reactor.connectTCP(host.encode(), port, factory, timeout=30, bindAddress=None)
+ reactor.connectTCP(
+ host, # type: ignore[arg-type]
+ port,
+ factory,
+ timeout=30,
+ bindAddress=None,
+ )
return factory.handler
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index c7555c26db..cdd6e3d3c1 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -240,7 +240,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
if self.hs.config.redis.redis_enabled:
# Handle attempts to connect to fake redis server.
self.reactor.add_tcp_client_callback(
- b"localhost",
+ "localhost",
6379,
self.connect_any_redis_attempts,
)
@@ -424,7 +424,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
clients = self.reactor.tcpClients
while clients:
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
- self.assertEqual(host, b"localhost")
+ self.assertEqual(host, "localhost")
self.assertEqual(port, 6379)
client_protocol = client_factory.buildProtocol(None)
diff --git a/tests/server.py b/tests/server.py
index 88dfa8058e..64645651ce 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -317,7 +317,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
def __init__(self):
self.threadpool = ThreadPool(self)
- self._tcp_callbacks = {}
+ self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {}
self._udp = []
self.lookups: Dict[str, str] = {}
self._thread_callbacks: Deque[Callable[[], None]] = deque()
@@ -355,7 +355,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
def getThreadPool(self):
return self.threadpool
- def add_tcp_client_callback(self, host, port, callback):
+ def add_tcp_client_callback(self, host: str, port: int, callback: Callable):
"""Add a callback that will be invoked when we receive a connection
attempt to the given IP/port using `connectTCP`.
@@ -364,7 +364,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
self._tcp_callbacks[(host, port)] = callback
- def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
+ def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None):
"""Fake L{IReactorTCP.connectTCP}."""
conn = super().connectTCP(
@@ -475,7 +475,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
return server
-def get_clock():
+def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]:
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return clock, hs_clock
--
cgit 1.5.1
From 145cb6d08e2f775da208293a507c1dcd2d4128ce Mon Sep 17 00:00:00 2001
From: Lukas Lihotzki
Date: Thu, 30 Sep 2021 14:04:55 +0200
Subject: Fix getTurnServer response: return an integer ttl (#10922)
`ttl` must be an integer according to the OpenAPI spec:
https://github.com/matrix-org/matrix-doc/blob/old_master/data/api/client-server/voip.yaml#L70
True division (`/`) returns a float instead (`"ttl": 7200.0`).
Floor division (`//`) returns an integer, so the response is spec compliant.
Signed-off-by: Lukas Lihotzki
---
changelog.d/10922.bugfix | 1 +
synapse/rest/client/voip.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10922.bugfix
diff --git a/changelog.d/10922.bugfix b/changelog.d/10922.bugfix
new file mode 100644
index 0000000000..b7315514e0
--- /dev/null
+++ b/changelog.d/10922.bugfix
@@ -0,0 +1 @@
+Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki.
diff --git a/synapse/rest/client/voip.py b/synapse/rest/client/voip.py
index ea2b8aa45f..ea7e025156 100644
--- a/synapse/rest/client/voip.py
+++ b/synapse/rest/client/voip.py
@@ -70,7 +70,7 @@ class VoipRestServlet(RestServlet):
{
"username": username,
"password": password,
- "ttl": userLifetime / 1000,
+ "ttl": userLifetime // 1000,
"uris": turnUris,
},
)
--
cgit 1.5.1
From 7d84d2523a02ce90badb6bdee5ffc182170a57fe Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 30 Sep 2021 11:03:29 -0400
Subject: Fix errors in Synapse logs from unit tests. (#10939)
Fix some harmless errors from background processes (mostly
due to awaiting Mock objects) that occurred in the Synapse
logs during unit tests.
---
changelog.d/10939.misc | 1 +
tests/appservice/test_scheduler.py | 40 ++++++++++++++----------------
tests/events/test_presence_router.py | 7 +++++-
tests/federation/test_federation_sender.py | 6 ++---
tests/module_api/test_api.py | 7 +++++-
5 files changed, 35 insertions(+), 26 deletions(-)
create mode 100644 changelog.d/10939.misc
diff --git a/changelog.d/10939.misc b/changelog.d/10939.misc
new file mode 100644
index 0000000000..a7cecf8a5b
--- /dev/null
+++ b/changelog.d/10939.misc
@@ -0,0 +1 @@
+Fix logged errors in unit tests.
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index a2b5ed2030..55f0899bae 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -24,7 +24,7 @@ from synapse.appservice.scheduler import (
from synapse.logging.context import make_deferred_yieldable
from tests import unittest
-from tests.test_utils import make_awaitable
+from tests.test_utils import simple_async_mock
from ..utils import MockClock
@@ -49,11 +49,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
txn = Mock(id=txn_id, service=service, events=events)
# mock methods
- self.store.get_appservice_state = Mock(
- return_value=defer.succeed(ApplicationServiceState.UP)
- )
- txn.send = Mock(return_value=make_awaitable(True))
- self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
+ self.store.get_appservice_state = simple_async_mock(ApplicationServiceState.UP)
+ txn.send = simple_async_mock(True)
+ txn.complete = simple_async_mock(True)
+ self.store.create_appservice_txn = simple_async_mock(txn)
# actual call
self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
@@ -71,10 +70,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
events = [Mock(), Mock()]
txn = Mock(id="idhere", service=service, events=events)
- self.store.get_appservice_state = Mock(
- return_value=defer.succeed(ApplicationServiceState.DOWN)
+ self.store.get_appservice_state = simple_async_mock(
+ ApplicationServiceState.DOWN
)
- self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
+ self.store.create_appservice_txn = simple_async_mock(txn)
# actual call
self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
@@ -94,12 +93,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
txn = Mock(id=txn_id, service=service, events=events)
# mock methods
- self.store.get_appservice_state = Mock(
- return_value=defer.succeed(ApplicationServiceState.UP)
- )
- self.store.set_appservice_state = Mock(return_value=defer.succeed(True))
- txn.send = Mock(return_value=make_awaitable(False)) # fails to send
- self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
+ self.store.get_appservice_state = simple_async_mock(ApplicationServiceState.UP)
+ self.store.set_appservice_state = simple_async_mock(True)
+ txn.send = simple_async_mock(False) # fails to send
+ self.store.create_appservice_txn = simple_async_mock(txn)
# actual call
self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
@@ -122,7 +119,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
self.as_api = Mock()
self.store = Mock()
self.service = Mock()
- self.callback = Mock()
+ self.callback = simple_async_mock()
self.recoverer = _Recoverer(
clock=self.clock,
as_api=self.as_api,
@@ -144,8 +141,8 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
self.recoverer.recover()
# shouldn't have called anything prior to waiting for exp backoff
self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count)
- txn.send = Mock(return_value=make_awaitable(True))
- txn.complete.return_value = make_awaitable(None)
+ txn.send = simple_async_mock(True)
+ txn.complete = simple_async_mock(None)
# wait for exp backoff
self.clock.advance_time(2)
self.assertEquals(1, txn.send.call_count)
@@ -170,8 +167,8 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
self.recoverer.recover()
self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count)
- txn.send = Mock(return_value=make_awaitable(False))
- txn.complete.return_value = make_awaitable(None)
+ txn.send = simple_async_mock(False)
+ txn.complete = simple_async_mock(None)
self.clock.advance_time(2)
self.assertEquals(1, txn.send.call_count)
self.assertEquals(0, txn.complete.call_count)
@@ -184,7 +181,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
self.assertEquals(3, txn.send.call_count)
self.assertEquals(0, txn.complete.call_count)
self.assertEquals(0, self.callback.call_count)
- txn.send = Mock(return_value=make_awaitable(True)) # successfully send the txn
+ txn.send = simple_async_mock(True) # successfully send the txn
pop_txn = True # returns the txn the first time, then no more.
self.clock.advance_time(16)
self.assertEquals(1, txn.send.call_count) # new mock reset call count
@@ -195,6 +192,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
def setUp(self):
self.txn_ctrl = Mock()
+ self.txn_ctrl.send = simple_async_mock()
self.queuer = _ServiceQueuer(self.txn_ctrl, MockClock())
def test_send_single_event_no_queue(self):
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index 3b3866bff8..3deb14c308 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -26,6 +26,7 @@ from synapse.rest.client import login, presence, room
from synapse.types import JsonDict, StreamToken, create_requester
from tests.handlers.test_sync import generate_sync_config
+from tests.test_utils import simple_async_mock
from tests.unittest import FederatingHomeserverTestCase, TestCase, override_config
@@ -133,8 +134,12 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
]
def make_homeserver(self, reactor, clock):
+ # Mock out the calls over federation.
+ fed_transport_client = Mock(spec=["send_transaction"])
+ fed_transport_client.send_transaction = simple_async_mock({})
+
hs = self.setup_test_homeserver(
- federation_transport_client=Mock(spec=["send_transaction"]),
+ federation_transport_client=fed_transport_client,
)
# Load the modules into the homeserver
module_api = hs.get_module_api()
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 65b18fbd7a..b457dad6d2 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -336,7 +336,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
recovery
"""
mock_send_txn = self.hs.get_federation_transport_client().send_transaction
- mock_send_txn.side_effect = lambda t, cb: defer.fail("fail")
+ mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail"))
# create devices
u1 = self.register_user("user", "pass")
@@ -376,7 +376,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
This case tests the behaviour when the server has never been reachable.
"""
mock_send_txn = self.hs.get_federation_transport_client().send_transaction
- mock_send_txn.side_effect = lambda t, cb: defer.fail("fail")
+ mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail"))
# create devices
u1 = self.register_user("user", "pass")
@@ -429,7 +429,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
# now the server goes offline
mock_send_txn = self.hs.get_federation_transport_client().send_transaction
- mock_send_txn.side_effect = lambda t, cb: defer.fail("fail")
+ mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail"))
self.login("user", "pass", device_id="D2")
self.login("user", "pass", device_id="D3")
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 9d38974fba..e915dd5c7c 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -25,6 +25,7 @@ from synapse.types import create_requester
from tests.events.test_presence_router import send_presence_update, sync_presence
from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.test_utils import simple_async_mock
from tests.test_utils.event_injection import inject_member_event
from tests.unittest import HomeserverTestCase, override_config
from tests.utils import USE_POSTGRES_FOR_TESTS
@@ -46,8 +47,12 @@ class ModuleApiTestCase(HomeserverTestCase):
self.auth_handler = homeserver.get_auth_handler()
def make_homeserver(self, reactor, clock):
+ # Mock out the calls over federation.
+ fed_transport_client = Mock(spec=["send_transaction"])
+ fed_transport_client.send_transaction = simple_async_mock({})
+
return self.setup_test_homeserver(
- federation_transport_client=Mock(spec=["send_transaction"]),
+ federation_transport_client=fed_transport_client,
)
def test_can_register_user(self):
--
cgit 1.5.1
From d1bf5f7c9d669fcf60aadc2c6527447adef2c43c Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 30 Sep 2021 11:13:59 -0400
Subject: Strip "join_authorised_via_users_server" from join events which do
not need it. (#10933)
This fixes a "Event not signed by authorising server" error when
transition room member from join -> join, e.g. when updating a
display name or avatar URL for restricted rooms.
---
changelog.d/10933.bugfix | 1 +
synapse/api/constants.py | 3 +++
synapse/event_auth.py | 12 +++++++-----
synapse/events/utils.py | 2 +-
synapse/federation/federation_base.py | 6 +++---
synapse/federation/federation_client.py | 6 +++---
synapse/federation/federation_server.py | 6 +++---
synapse/handlers/federation.py | 9 +++++++--
synapse/handlers/room_member.py | 10 +++++++++-
tests/events/test_utils.py | 7 ++++---
tests/test_event_auth.py | 9 +++++----
11 files changed, 46 insertions(+), 25 deletions(-)
create mode 100644 changelog.d/10933.bugfix
diff --git a/changelog.d/10933.bugfix b/changelog.d/10933.bugfix
new file mode 100644
index 0000000000..e0694fea22
--- /dev/null
+++ b/changelog.d/10933.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 39fd9954d5..a31f037748 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -217,6 +217,9 @@ class EventContentFields:
# For "marker" events
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
+ # The authorising user for joining a restricted room.
+ AUTHORISING_USER = "join_authorised_via_users_server"
+
class RoomTypes:
"""Understood values of the room_type field of m.room.create events."""
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index eef354de6e..7a1adc2750 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -102,11 +102,11 @@ def validate_event_for_room_version(
room_version_obj.msc3083_join_rules
and event.type == EventTypes.Member
and event.membership == Membership.JOIN
- and "join_authorised_via_users_server" in event.content
+ and EventContentFields.AUTHORISING_USER in event.content
)
if is_invite_via_allow_rule:
authoriser_domain = get_domain_from_id(
- event.content["join_authorised_via_users_server"]
+ event.content[EventContentFields.AUTHORISING_USER]
)
if not event.signatures.get(authoriser_domain):
raise AuthError(403, "Event not signed by authorising server")
@@ -413,7 +413,9 @@ def _is_membership_change_allowed(
# Note that if the caller is in the room or invited, then they do
# not need to meet the allow rules.
if not caller_in_room and not caller_invited:
- authorising_user = event.content.get("join_authorised_via_users_server")
+ authorising_user = event.content.get(
+ EventContentFields.AUTHORISING_USER
+ )
if authorising_user is None:
raise AuthError(403, "Join event is missing authorising user.")
@@ -868,10 +870,10 @@ def auth_types_for_event(
auth_types.add(key)
if room_version.msc3083_join_rules and membership == Membership.JOIN:
- if "join_authorised_via_users_server" in event.content:
+ if EventContentFields.AUTHORISING_USER in event.content:
key = (
EventTypes.Member,
- event.content["join_authorised_via_users_server"],
+ event.content[EventContentFields.AUTHORISING_USER],
)
auth_types.add(key)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index a13fb0148f..520edbbf61 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -105,7 +105,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
if event_type == EventTypes.Member:
add_fields("membership")
if room_version.msc3375_redaction_rules:
- add_fields("join_authorised_via_users_server")
+ add_fields(EventContentFields.AUTHORISING_USER)
elif event_type == EventTypes.Create:
# MSC2176 rules state that create events cannot be redacted.
if room_version.msc2176_redaction_rules:
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 024e440ff4..0cd424e12a 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -15,7 +15,7 @@
import logging
from collections import namedtuple
-from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
+from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import EventFormatVersions, RoomVersion
from synapse.crypto.event_signing import check_event_content_hash
@@ -184,10 +184,10 @@ async def _check_sigs_on_pdu(
room_version.msc3083_join_rules
and pdu.type == EventTypes.Member
and pdu.membership == Membership.JOIN
- and "join_authorised_via_users_server" in pdu.content
+ and EventContentFields.AUTHORISING_USER in pdu.content
):
authorising_server = get_domain_from_id(
- pdu.content["join_authorised_via_users_server"]
+ pdu.content[EventContentFields.AUTHORISING_USER]
)
try:
await keyring.verify_event_for_server(
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 584836c04a..2ab4dec88f 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -37,7 +37,7 @@ from typing import (
import attr
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.errors import (
CodeMessageException,
Codes,
@@ -875,9 +875,9 @@ class FederationClient(FederationBase):
# If the join is being authorised via allow rules, we need to send
# the /send_join back to the same server that was originally used
# with /make_join.
- if "join_authorised_via_users_server" in pdu.content:
+ if EventContentFields.AUTHORISING_USER in pdu.content:
destinations = [
- get_domain_from_id(pdu.content["join_authorised_via_users_server"])
+ get_domain_from_id(pdu.content[EventContentFields.AUTHORISING_USER])
]
return await self._try_destination_list(
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 83f11d6b88..d8c0b86f23 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -34,7 +34,7 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
-from synapse.api.constants import EduTypes, EventTypes, Membership
+from synapse.api.constants import EduTypes, EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
@@ -765,11 +765,11 @@ class FederationServer(FederationBase):
if (
room_version.msc3083_join_rules
and event.membership == Membership.JOIN
- and "join_authorised_via_users_server" in event.content
+ and EventContentFields.AUTHORISING_USER in event.content
):
# We can only authorise our own users.
authorising_server = get_domain_from_id(
- event.content["join_authorised_via_users_server"]
+ event.content[EventContentFields.AUTHORISING_USER]
)
if authorising_server != self.server_name:
raise SynapseError(
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 0a10a5c28a..043ca4a224 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -27,7 +27,12 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, RejectedReason
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ Membership,
+ RejectedReason,
+)
from synapse.api.errors import (
AuthError,
CodeMessageException,
@@ -716,7 +721,7 @@ class FederationHandler(BaseHandler):
if include_auth_user_id:
event_content[
- "join_authorised_via_users_server"
+ EventContentFields.AUTHORISING_USER
] = await self._event_auth_handler.get_user_which_could_invite(
room_id,
state_ids,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 02103f6c9a..29b3e41cc9 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -573,6 +573,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
errcode=Codes.BAD_JSON,
)
+ # The event content should *not* include the authorising user as
+ # it won't be properly signed. Strip it out since it might come
+ # back from a client updating a display name / avatar.
+ #
+ # This only applies to restricted rooms, but there should be no reason
+ # for a client to include it. Unconditionally remove it.
+ content.pop(EventContentFields.AUTHORISING_USER, None)
+
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
@@ -939,7 +947,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# be included in the event content in order to efficiently validate
# the event.
content[
- "join_authorised_via_users_server"
+ EventContentFields.AUTHORISING_USER
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
current_state_ids,
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 5446fda5e7..1dea09e480 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from synapse.api.constants import EventContentFields
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
from synapse.events.utils import (
@@ -352,7 +353,7 @@ class PruneEventTestCase(unittest.TestCase):
"event_id": "$test:domain",
"content": {
"membership": "join",
- "join_authorised_via_users_server": "@user:domain",
+ EventContentFields.AUTHORISING_USER: "@user:domain",
"other_key": "stripped",
},
},
@@ -372,7 +373,7 @@ class PruneEventTestCase(unittest.TestCase):
"type": "m.room.member",
"content": {
"membership": "join",
- "join_authorised_via_users_server": "@user:domain",
+ EventContentFields.AUTHORISING_USER: "@user:domain",
"other_key": "stripped",
},
},
@@ -380,7 +381,7 @@ class PruneEventTestCase(unittest.TestCase):
"type": "m.room.member",
"content": {
"membership": "join",
- "join_authorised_via_users_server": "@user:domain",
+ EventContentFields.AUTHORISING_USER: "@user:domain",
},
"signatures": {},
"unsigned": {},
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index e7a7d00883..cf407c51cf 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -16,6 +16,7 @@ import unittest
from typing import Optional
from synapse import event_auth
+from synapse.api.constants import EventContentFields
from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
@@ -353,7 +354,7 @@ class EventAuthTestCase(unittest.TestCase):
authorised_join_event = _join_event(
pleb,
additional_content={
- "join_authorised_via_users_server": "@creator:example.com"
+ EventContentFields.AUTHORISING_USER: "@creator:example.com"
},
)
event_auth.check_auth_rules_for_event(
@@ -376,7 +377,7 @@ class EventAuthTestCase(unittest.TestCase):
_join_event(
pleb,
additional_content={
- "join_authorised_via_users_server": "@inviter:foo.test"
+ EventContentFields.AUTHORISING_USER: "@inviter:foo.test"
},
),
pl_auth_events,
@@ -401,7 +402,7 @@ class EventAuthTestCase(unittest.TestCase):
_join_event(
pleb,
additional_content={
- "join_authorised_via_users_server": "@other:example.com"
+ EventContentFields.AUTHORISING_USER: "@other:example.com"
},
),
auth_events,
@@ -417,7 +418,7 @@ class EventAuthTestCase(unittest.TestCase):
"join",
sender=creator,
additional_content={
- "join_authorised_via_users_server": "@inviter:foo.test"
+ EventContentFields.AUTHORISING_USER: "@inviter:foo.test"
},
),
auth_events,
--
cgit 1.5.1
From 9e5a429c8b082d4cfbc0bd04c1ddde8822fd96b4 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 30 Sep 2021 14:06:02 -0400
Subject: Clean-up registration tests (#10945)
Uses `override_config` and fixes test_auto_create_auto_join_where_no_consent
to properly configure auto-join rooms.
---
changelog.d/10945.misc | 1 +
synapse/handlers/register.py | 4 +-
tests/handlers/test_register.py | 89 ++++++++++++++++++++++++-----------------
3 files changed, 56 insertions(+), 38 deletions(-)
create mode 100644 changelog.d/10945.misc
diff --git a/changelog.d/10945.misc b/changelog.d/10945.misc
new file mode 100644
index 0000000000..7cf1f02ad6
--- /dev/null
+++ b/changelog.d/10945.misc
@@ -0,0 +1 @@
+Fix a broken test to ensure that consent configuration works during registration.
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 4a7ccb882e..cb4eb0720b 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -340,6 +340,8 @@ class RegistrationHandler(BaseHandler):
auth_provider=(auth_provider_id or ""),
).inc()
+ # If the user does not need to consent at registration, auto-join any
+ # configured rooms.
if not self.hs.config.consent.user_consent_at_registration:
if not self.hs.config.auto_join_rooms_for_guests and make_guest:
logger.info(
@@ -387,7 +389,7 @@ class RegistrationHandler(BaseHandler):
"preset": self.hs.config.registration.autocreate_auto_join_room_preset,
}
- # If the configuration providers a user ID to create rooms with, use
+ # If the configuration provides a user ID to create rooms with, use
# that instead of the first user registered.
requires_join = False
if self.hs.config.registration.auto_join_user_id:
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index bd05a2c2d1..db691c4c1c 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -16,7 +16,12 @@ from unittest.mock import Mock
from synapse.api.auth import Auth
from synapse.api.constants import UserTypes
-from synapse.api.errors import Codes, ResourceLimitError, SynapseError
+from synapse.api.errors import (
+ CodeMessageException,
+ Codes,
+ ResourceLimitError,
+ SynapseError,
+)
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.spam_checker_api import RegistrationBehaviour
from synapse.types import RoomAlias, RoomID, UserID, create_requester
@@ -120,14 +125,24 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
hs_config = self.default_config()
# some of the tests rely on us having a user consent version
- hs_config["user_consent"] = {
- "version": "test_consent_version",
- "template_dir": ".",
- }
+ hs_config.setdefault("user_consent", {}).update(
+ {
+ "version": "test_consent_version",
+ "template_dir": ".",
+ }
+ )
hs_config["max_mau_value"] = 50
hs_config["limit_usage_by_mau"] = True
- hs = self.setup_test_homeserver(config=hs_config)
+ # Don't attempt to reach out over federation.
+ self.mock_federation_client = Mock()
+ self.mock_federation_client.make_query.side_effect = CodeMessageException(
+ 500, ""
+ )
+
+ hs = self.setup_test_homeserver(
+ config=hs_config, federation_client=self.mock_federation_client
+ )
load_legacy_spam_checkers(hs)
@@ -138,9 +153,6 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
return hs
def prepare(self, reactor, clock, hs):
- self.mock_distributor = Mock()
- self.mock_distributor.declare("registered_user")
- self.mock_captcha_client = Mock()
self.handler = self.hs.get_registration_handler()
self.store = self.hs.get_datastore()
self.lots_of_users = 100
@@ -174,21 +186,21 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.assertEquals(result_user_id, user_id)
self.assertTrue(result_token is not None)
+ @override_config({"limit_usage_by_mau": False})
def test_mau_limits_when_disabled(self):
- self.hs.config.server.limit_usage_by_mau = False
# Ensure does not throw exception
self.get_success(self.get_or_create_user(self.requester, "a", "display_name"))
+ @override_config({"limit_usage_by_mau": True})
def test_get_or_create_user_mau_not_blocked(self):
- self.hs.config.server.limit_usage_by_mau = True
self.store.count_monthly_users = Mock(
return_value=make_awaitable(self.hs.config.server.max_mau_value - 1)
)
# Ensure does not throw exception
self.get_success(self.get_or_create_user(self.requester, "c", "User"))
+ @override_config({"limit_usage_by_mau": True})
def test_get_or_create_user_mau_blocked(self):
- self.hs.config.server.limit_usage_by_mau = True
self.store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.lots_of_users)
)
@@ -205,8 +217,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
ResourceLimitError,
)
+ @override_config({"limit_usage_by_mau": True})
def test_register_mau_blocked(self):
- self.hs.config.server.limit_usage_by_mau = True
self.store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.lots_of_users)
)
@@ -221,10 +233,10 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.handler.register_user(localpart="local_part"), ResourceLimitError
)
+ @override_config(
+ {"auto_join_rooms": ["#room:test"], "auto_join_rooms_for_guests": False}
+ )
def test_auto_join_rooms_for_guests(self):
- room_alias_str = "#room:test"
- self.hs.config.auto_join_rooms = [room_alias_str]
- self.hs.config.auto_join_rooms_for_guests = False
user_id = self.get_success(
self.handler.register_user(localpart="jeff", make_guest=True),
)
@@ -243,34 +255,33 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.assertTrue(room_id["room_id"] in rooms)
self.assertEqual(len(rooms), 1)
+ @override_config({"auto_join_rooms": []})
def test_auto_create_auto_join_rooms_with_no_rooms(self):
- self.hs.config.auto_join_rooms = []
frank = UserID.from_string("@frank:test")
user_id = self.get_success(self.handler.register_user(frank.localpart))
self.assertEqual(user_id, frank.to_string())
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
+ @override_config({"auto_join_rooms": ["#room:another"]})
def test_auto_create_auto_join_where_room_is_another_domain(self):
- self.hs.config.auto_join_rooms = ["#room:another"]
frank = UserID.from_string("@frank:test")
user_id = self.get_success(self.handler.register_user(frank.localpart))
self.assertEqual(user_id, frank.to_string())
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
+ @override_config(
+ {"auto_join_rooms": ["#room:test"], "autocreate_auto_join_rooms": False}
+ )
def test_auto_create_auto_join_where_auto_create_is_false(self):
- self.hs.config.autocreate_auto_join_rooms = False
- room_alias_str = "#room:test"
- self.hs.config.auto_join_rooms = [room_alias_str]
user_id = self.get_success(self.handler.register_user(localpart="jeff"))
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
+ @override_config({"auto_join_rooms": ["#room:test"]})
def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self):
room_alias_str = "#room:test"
- self.hs.config.auto_join_rooms = [room_alias_str]
-
self.store.is_real_user = Mock(return_value=make_awaitable(False))
user_id = self.get_success(self.handler.register_user(localpart="support"))
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
@@ -294,10 +305,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.assertTrue(room_id["room_id"] in rooms)
self.assertEqual(len(rooms), 1)
+ @override_config({"auto_join_rooms": ["#room:test"]})
def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user(self):
- room_alias_str = "#room:test"
- self.hs.config.auto_join_rooms = [room_alias_str]
-
self.store.count_real_users = Mock(return_value=make_awaitable(2))
self.store.is_real_user = Mock(return_value=make_awaitable(True))
user_id = self.get_success(self.handler.register_user(localpart="real"))
@@ -510,6 +519,17 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.assertEqual(rooms, set())
self.assertEqual(invited_rooms, [])
+ @override_config(
+ {
+ "user_consent": {
+ "block_events_error": "Error",
+ "require_at_registration": True,
+ },
+ "form_secret": "53cr3t",
+ "public_baseurl": "http://test",
+ "auto_join_rooms": ["#room:test"],
+ },
+ )
def test_auto_create_auto_join_where_no_consent(self):
"""Test to ensure that the first user is not auto-joined to a room if
they have not given general consent.
@@ -521,25 +541,20 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
# * The server is configured to auto-join to a room
# (and autocreate if necessary)
- event_creation_handler = self.hs.get_event_creation_handler()
- # (Messing with the internals of event_creation_handler is fragile
- # but can't see a better way to do this. One option could be to subclass
- # the test with custom config.)
- event_creation_handler._block_events_without_consent_error = "Error"
- event_creation_handler._consent_uri_builder = Mock()
- room_alias_str = "#room:test"
- self.hs.config.auto_join_rooms = [room_alias_str]
-
# When:-
- # * the user is registered and post consent actions are called
+ # * the user is registered
user_id = self.get_success(self.handler.register_user(localpart="jeff"))
- self.get_success(self.handler.post_consent_actions(user_id))
# Then:-
# * Ensure that they have not been joined to the room
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
+ # The user provides consent; ensure they are now in the rooms.
+ self.get_success(self.handler.post_consent_actions(user_id))
+ rooms = self.get_success(self.store.get_rooms_for_user(user_id))
+ self.assertEqual(len(rooms), 1)
+
def test_register_support_user(self):
user_id = self.get_success(
self.handler.register_user(localpart="user", user_type=UserTypes.SUPPORT)
--
cgit 1.5.1
From 7e440520c9b370ce008c6a65c5dd87a360a6457c Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Fri, 1 Oct 2021 07:02:32 -0400
Subject: Add type hints to filtering classes. (#10958)
---
changelog.d/10958.misc | 1 +
synapse/api/filtering.py | 117 ++++++++++++++++++----------
synapse/storage/databases/main/filtering.py | 8 +-
3 files changed, 81 insertions(+), 45 deletions(-)
create mode 100644 changelog.d/10958.misc
diff --git a/changelog.d/10958.misc b/changelog.d/10958.misc
new file mode 100644
index 0000000000..409ecc35cb
--- /dev/null
+++ b/changelog.d/10958.misc
@@ -0,0 +1 @@
+Add type hints to filtering classes.
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index ad1ff6a9df..20e91a115d 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -15,7 +15,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
-from typing import List
+from typing import (
+ TYPE_CHECKING,
+ Awaitable,
+ Container,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ TypeVar,
+ Union,
+)
import jsonschema
from jsonschema import FormatChecker
@@ -23,7 +33,11 @@ from jsonschema import FormatChecker
from synapse.api.constants import EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
-from synapse.types import RoomID, UserID
+from synapse.events import EventBase
+from synapse.types import JsonDict, RoomID, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
FILTER_SCHEMA = {
"additionalProperties": False,
@@ -120,25 +134,29 @@ USER_FILTER_SCHEMA = {
@FormatChecker.cls_checks("matrix_room_id")
-def matrix_room_id_validator(room_id_str):
+def matrix_room_id_validator(room_id_str: str) -> RoomID:
return RoomID.from_string(room_id_str)
@FormatChecker.cls_checks("matrix_user_id")
-def matrix_user_id_validator(user_id_str):
+def matrix_user_id_validator(user_id_str: str) -> UserID:
return UserID.from_string(user_id_str)
class Filtering:
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
- async def get_user_filter(self, user_localpart, filter_id):
+ async def get_user_filter(
+ self, user_localpart: str, filter_id: Union[int, str]
+ ) -> "FilterCollection":
result = await self.store.get_user_filter(user_localpart, filter_id)
return FilterCollection(result)
- def add_user_filter(self, user_localpart, user_filter):
+ def add_user_filter(
+ self, user_localpart: str, user_filter: JsonDict
+ ) -> Awaitable[int]:
self.check_valid_filter(user_filter)
return self.store.add_user_filter(user_localpart, user_filter)
@@ -146,13 +164,13 @@ class Filtering:
# replace_user_filter at some point? There's no REST API specified for
# them however
- def check_valid_filter(self, user_filter_json):
+ def check_valid_filter(self, user_filter_json: JsonDict) -> None:
"""Check if the provided filter is valid.
This inspects all definitions contained within the filter.
Args:
- user_filter_json(dict): The filter
+ user_filter_json: The filter
Raises:
SynapseError: If the filter is not valid.
"""
@@ -167,8 +185,12 @@ class Filtering:
raise SynapseError(400, str(e))
+# Filters work across events, presence EDUs, and account data.
+FilterEvent = TypeVar("FilterEvent", EventBase, UserPresenceState, JsonDict)
+
+
class FilterCollection:
- def __init__(self, filter_json):
+ def __init__(self, filter_json: JsonDict):
self._filter_json = filter_json
room_filter_json = self._filter_json.get("room", {})
@@ -188,25 +210,25 @@ class FilterCollection:
self.event_fields = filter_json.get("event_fields", [])
self.event_format = filter_json.get("event_format", "client")
- def __repr__(self):
+ def __repr__(self) -> str:
return "" % (json.dumps(self._filter_json),)
- def get_filter_json(self):
+ def get_filter_json(self) -> JsonDict:
return self._filter_json
- def timeline_limit(self):
+ def timeline_limit(self) -> int:
return self._room_timeline_filter.limit()
- def presence_limit(self):
+ def presence_limit(self) -> int:
return self._presence_filter.limit()
- def ephemeral_limit(self):
+ def ephemeral_limit(self) -> int:
return self._room_ephemeral_filter.limit()
- def lazy_load_members(self):
+ def lazy_load_members(self) -> bool:
return self._room_state_filter.lazy_load_members()
- def include_redundant_members(self):
+ def include_redundant_members(self) -> bool:
return self._room_state_filter.include_redundant_members()
def filter_presence(self, events):
@@ -218,29 +240,31 @@ class FilterCollection:
def filter_room_state(self, events):
return self._room_state_filter.filter(self._room_filter.filter(events))
- def filter_room_timeline(self, events):
+ def filter_room_timeline(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
return self._room_timeline_filter.filter(self._room_filter.filter(events))
- def filter_room_ephemeral(self, events):
+ def filter_room_ephemeral(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
- def filter_room_account_data(self, events):
+ def filter_room_account_data(
+ self, events: Iterable[FilterEvent]
+ ) -> List[FilterEvent]:
return self._room_account_data.filter(self._room_filter.filter(events))
- def blocks_all_presence(self):
+ def blocks_all_presence(self) -> bool:
return (
self._presence_filter.filters_all_types()
or self._presence_filter.filters_all_senders()
)
- def blocks_all_room_ephemeral(self):
+ def blocks_all_room_ephemeral(self) -> bool:
return (
self._room_ephemeral_filter.filters_all_types()
or self._room_ephemeral_filter.filters_all_senders()
or self._room_ephemeral_filter.filters_all_rooms()
)
- def blocks_all_room_timeline(self):
+ def blocks_all_room_timeline(self) -> bool:
return (
self._room_timeline_filter.filters_all_types()
or self._room_timeline_filter.filters_all_senders()
@@ -249,7 +273,7 @@ class FilterCollection:
class Filter:
- def __init__(self, filter_json):
+ def __init__(self, filter_json: JsonDict):
self.filter_json = filter_json
self.types = self.filter_json.get("types", None)
@@ -266,20 +290,20 @@ class Filter:
self.labels = self.filter_json.get("org.matrix.labels", None)
self.not_labels = self.filter_json.get("org.matrix.not_labels", [])
- def filters_all_types(self):
+ def filters_all_types(self) -> bool:
return "*" in self.not_types
- def filters_all_senders(self):
+ def filters_all_senders(self) -> bool:
return "*" in self.not_senders
- def filters_all_rooms(self):
+ def filters_all_rooms(self) -> bool:
return "*" in self.not_rooms
- def check(self, event):
+ def check(self, event: FilterEvent) -> bool:
"""Checks whether the filter matches the given event.
Returns:
- bool: True if the event matches
+ True if the event matches
"""
# We usually get the full "events" as dictionaries coming through,
# except for presence which actually gets passed around as its own
@@ -305,18 +329,25 @@ class Filter:
room_id = event.get("room_id", None)
ev_type = event.get("type", None)
- content = event.get("content", {})
+ content = event.get("content") or {}
# check if there is a string url field in the content for filtering purposes
contains_url = isinstance(content.get("url"), str)
labels = content.get(EventContentFields.LABELS, [])
return self.check_fields(room_id, sender, ev_type, labels, contains_url)
- def check_fields(self, room_id, sender, event_type, labels, contains_url):
+ def check_fields(
+ self,
+ room_id: Optional[str],
+ sender: Optional[str],
+ event_type: Optional[str],
+ labels: Container[str],
+ contains_url: bool,
+ ) -> bool:
"""Checks whether the filter matches the given event fields.
Returns:
- bool: True if the event fields match
+ True if the event fields match
"""
literal_keys = {
"rooms": lambda v: room_id == v,
@@ -343,14 +374,14 @@ class Filter:
return True
- def filter_rooms(self, room_ids):
+ def filter_rooms(self, room_ids: Iterable[str]) -> Set[str]:
"""Apply the 'rooms' filter to a given list of rooms.
Args:
- room_ids (list): A list of room_ids.
+ room_ids: A list of room_ids.
Returns:
- list: A list of room_ids that match the filter
+ A list of room_ids that match the filter
"""
room_ids = set(room_ids)
@@ -363,23 +394,23 @@ class Filter:
return room_ids
- def filter(self, events):
+ def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
return list(filter(self.check, events))
- def limit(self):
+ def limit(self) -> int:
return self.filter_json.get("limit", 10)
- def lazy_load_members(self):
+ def lazy_load_members(self) -> bool:
return self.filter_json.get("lazy_load_members", False)
- def include_redundant_members(self):
+ def include_redundant_members(self) -> bool:
return self.filter_json.get("include_redundant_members", False)
- def with_room_ids(self, room_ids):
+ def with_room_ids(self, room_ids: Iterable[str]) -> "Filter":
"""Returns a new filter with the given room IDs appended.
Args:
- room_ids (iterable[unicode]): The room_ids to add
+ room_ids: The room_ids to add
Returns:
filter: A new filter including the given rooms and the old
@@ -390,8 +421,8 @@ class Filter:
return newFilter
-def _matches_wildcard(actual_value, filter_value):
- if filter_value.endswith("*"):
+def _matches_wildcard(actual_value: Optional[str], filter_value: str) -> bool:
+ if filter_value.endswith("*") and isinstance(actual_value, str):
type_prefix = filter_value[:-1]
return actual_value.startswith(type_prefix)
else:
diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py
index bb244a03c0..434986fa64 100644
--- a/synapse/storage/databases/main/filtering.py
+++ b/synapse/storage/databases/main/filtering.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Union
+
from canonicaljson import encode_canonical_json
from synapse.api.errors import Codes, SynapseError
@@ -22,7 +24,9 @@ from synapse.util.caches.descriptors import cached
class FilteringStore(SQLBaseStore):
@cached(num_args=2)
- async def get_user_filter(self, user_localpart, filter_id):
+ async def get_user_filter(
+ self, user_localpart: str, filter_id: Union[int, str]
+ ) -> JsonDict:
# filter_id is BIGINT UNSIGNED, so if it isn't a number, fail
# with a coherent error message rather than 500 M_UNKNOWN.
try:
@@ -40,7 +44,7 @@ class FilteringStore(SQLBaseStore):
return db_to_json(def_json)
- async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> str:
+ async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int:
def_json = encode_canonical_json(user_filter)
# Need an atomic transaction to SELECT the maximal ID so far then
--
cgit 1.5.1
From e46ac85d674d90fa01aa49aee9587093ab6d8677 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Fri, 1 Oct 2021 12:22:47 +0100
Subject: type-hint `HomeserverTestcase.setup_test_homeserver` (#10961)
* type-hint `HomeserverTestcase.setup_test_homeserver`
For better IDE completion. A small drive-by.
---
changelog.d/10961.misc | 1 +
tests/replication/_base.py | 19 +++++++++++++++----
tests/rest/client/test_login.py | 6 +++---
tests/unittest.py | 4 ++--
4 files changed, 21 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/10961.misc
diff --git a/changelog.d/10961.misc b/changelog.d/10961.misc
new file mode 100644
index 0000000000..0e35813488
--- /dev/null
+++ b/changelog.d/10961.misc
@@ -0,0 +1 @@
+Add type-hint to `HomeserverTestcase.setup_test_homeserver`.
\ No newline at end of file
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index cdd6e3d3c1..eac4664b41 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -70,8 +70,16 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
# databases objects are the same.
self.worker_hs.get_datastore().db_pool = hs.get_datastore().db_pool
+ # Normally we'd pass in the handler to `setup_test_homeserver`, which would
+ # eventually hit "Install @cache_in_self attributes" in tests/utils.py.
+ # Unfortunately our handler wants a reference to the homeserver. That leaves
+ # us with a chicken-and-egg problem.
+ # We can workaround this: create the homeserver first, create the handler
+ # and bodge it in after the fact. The bodging requires us to know the
+ # dirty details of how `cache_in_self` works. We politely ask mypy to
+ # ignore our dirty dealings.
self.test_handler = self._build_replication_data_handler()
- self.worker_hs._replication_data_handler = self.test_handler
+ self.worker_hs._replication_data_handler = self.test_handler # type: ignore[attr-defined]
repl_handler = ReplicationCommandHandler(self.worker_hs)
self.client = ClientReplicationStreamProtocol(
@@ -315,12 +323,15 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
)
)
+ # Copy the port into a new, non-Optional variable so mypy knows we're
+ # not going to reset `instance_loc` to `None` under its feet. See
+ # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
+ port = instance_loc.port
+
self.reactor.add_tcp_client_callback(
self.reactor.lookups[instance_loc.host],
instance_loc.port,
- lambda: self._handle_http_replication_attempt(
- worker_hs, instance_loc.port
- ),
+ lambda: self._handle_http_replication_attempt(worker_hs, port),
)
store = worker_hs.get_datastore()
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 371615a015..7fd92c94e0 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -94,9 +94,9 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
self.hs = self.setup_test_homeserver()
- self.hs.config.enable_registration = True
- self.hs.config.registrations_require_3pid = []
- self.hs.config.auto_join_rooms = []
+ self.hs.config.registration.enable_registration = True
+ self.hs.config.registration.registrations_require_3pid = []
+ self.hs.config.registration.auto_join_rooms = []
self.hs.config.captcha.enable_registration_captcha = False
return self.hs
diff --git a/tests/unittest.py b/tests/unittest.py
index 5f93ebf147..0807467e39 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -20,7 +20,7 @@ import inspect
import logging
import secrets
import time
-from typing import Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union
+from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union
from unittest.mock import Mock, patch
from canonicaljson import json
@@ -449,7 +449,7 @@ class HomeserverTestCase(TestCase):
client_ip,
)
- def setup_test_homeserver(self, *args, **kwargs):
+ def setup_test_homeserver(self, *args: Any, **kwargs: Any) -> HomeServer:
"""
Set up the test homeserver, meant to be called by the overridable
make_homeserver. It automatically passes through the test class's
--
cgit 1.5.1
From 32072dcdac0072049832cda6204cd75be2d4e38f Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 30 Sep 2021 11:13:59 -0400
Subject: Strip "join_authorised_via_users_server" from join events which do
not need it. (#10933)
This fixes a "Event not signed by authorising server" error when
transition room member from join -> join, e.g. when updating a
display name or avatar URL for restricted rooms.
---
changelog.d/10933.bugfix | 1 +
synapse/api/constants.py | 3 +++
synapse/event_auth.py | 12 +++++++-----
synapse/events/utils.py | 2 +-
synapse/federation/federation_base.py | 6 +++---
synapse/federation/federation_client.py | 6 +++---
synapse/federation/federation_server.py | 6 +++---
synapse/handlers/federation.py | 9 +++++++--
synapse/handlers/room_member.py | 10 +++++++++-
tests/events/test_utils.py | 7 ++++---
tests/test_event_auth.py | 9 +++++----
11 files changed, 46 insertions(+), 25 deletions(-)
create mode 100644 changelog.d/10933.bugfix
diff --git a/changelog.d/10933.bugfix b/changelog.d/10933.bugfix
new file mode 100644
index 0000000000..e0694fea22
--- /dev/null
+++ b/changelog.d/10933.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 39fd9954d5..a31f037748 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -217,6 +217,9 @@ class EventContentFields:
# For "marker" events
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
+ # The authorising user for joining a restricted room.
+ AUTHORISING_USER = "join_authorised_via_users_server"
+
class RoomTypes:
"""Understood values of the room_type field of m.room.create events."""
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index fc50a0e71a..650402836c 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -115,11 +115,11 @@ def check(
is_invite_via_allow_rule = (
event.type == EventTypes.Member
and event.membership == Membership.JOIN
- and "join_authorised_via_users_server" in event.content
+ and EventContentFields.AUTHORISING_USER in event.content
)
if is_invite_via_allow_rule:
authoriser_domain = get_domain_from_id(
- event.content["join_authorised_via_users_server"]
+ event.content[EventContentFields.AUTHORISING_USER]
)
if not event.signatures.get(authoriser_domain):
raise AuthError(403, "Event not signed by authorising server")
@@ -381,7 +381,9 @@ def _is_membership_change_allowed(
# Note that if the caller is in the room or invited, then they do
# not need to meet the allow rules.
if not caller_in_room and not caller_invited:
- authorising_user = event.content.get("join_authorised_via_users_server")
+ authorising_user = event.content.get(
+ EventContentFields.AUTHORISING_USER
+ )
if authorising_user is None:
raise AuthError(403, "Join event is missing authorising user.")
@@ -836,10 +838,10 @@ def auth_types_for_event(
auth_types.add(key)
if room_version.msc3083_join_rules and membership == Membership.JOIN:
- if "join_authorised_via_users_server" in event.content:
+ if EventContentFields.AUTHORISING_USER in event.content:
key = (
EventTypes.Member,
- event.content["join_authorised_via_users_server"],
+ event.content[EventContentFields.AUTHORISING_USER],
)
auth_types.add(key)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index f86113a448..38fccd1efc 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -105,7 +105,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
if event_type == EventTypes.Member:
add_fields("membership")
if room_version.msc3375_redaction_rules:
- add_fields("join_authorised_via_users_server")
+ add_fields(EventContentFields.AUTHORISING_USER)
elif event_type == EventTypes.Create:
# MSC2176 rules state that create events cannot be redacted.
if room_version.msc2176_redaction_rules:
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 024e440ff4..0cd424e12a 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -15,7 +15,7 @@
import logging
from collections import namedtuple
-from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
+from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import EventFormatVersions, RoomVersion
from synapse.crypto.event_signing import check_event_content_hash
@@ -184,10 +184,10 @@ async def _check_sigs_on_pdu(
room_version.msc3083_join_rules
and pdu.type == EventTypes.Member
and pdu.membership == Membership.JOIN
- and "join_authorised_via_users_server" in pdu.content
+ and EventContentFields.AUTHORISING_USER in pdu.content
):
authorising_server = get_domain_from_id(
- pdu.content["join_authorised_via_users_server"]
+ pdu.content[EventContentFields.AUTHORISING_USER]
)
try:
await keyring.verify_event_for_server(
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 584836c04a..2ab4dec88f 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -37,7 +37,7 @@ from typing import (
import attr
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.errors import (
CodeMessageException,
Codes,
@@ -875,9 +875,9 @@ class FederationClient(FederationBase):
# If the join is being authorised via allow rules, we need to send
# the /send_join back to the same server that was originally used
# with /make_join.
- if "join_authorised_via_users_server" in pdu.content:
+ if EventContentFields.AUTHORISING_USER in pdu.content:
destinations = [
- get_domain_from_id(pdu.content["join_authorised_via_users_server"])
+ get_domain_from_id(pdu.content[EventContentFields.AUTHORISING_USER])
]
return await self._try_destination_list(
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 638959cbec..5f4383eebc 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -34,7 +34,7 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
-from synapse.api.constants import EduTypes, EventTypes, Membership
+from synapse.api.constants import EduTypes, EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
@@ -765,11 +765,11 @@ class FederationServer(FederationBase):
if (
room_version.msc3083_join_rules
and event.membership == Membership.JOIN
- and "join_authorised_via_users_server" in event.content
+ and EventContentFields.AUTHORISING_USER in event.content
):
# We can only authorise our own users.
authorising_server = get_domain_from_id(
- event.content["join_authorised_via_users_server"]
+ event.content[EventContentFields.AUTHORISING_USER]
)
if authorising_server != self.server_name:
raise SynapseError(
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index b17ef2a9a1..adbd150e46 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -27,7 +27,12 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, RejectedReason
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ Membership,
+ RejectedReason,
+)
from synapse.api.errors import (
AuthError,
CodeMessageException,
@@ -712,7 +717,7 @@ class FederationHandler(BaseHandler):
if include_auth_user_id:
event_content[
- "join_authorised_via_users_server"
+ EventContentFields.AUTHORISING_USER
] = await self._event_auth_handler.get_user_which_could_invite(
room_id,
state_ids,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 1a56c82fbd..afa7e4727d 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -573,6 +573,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
errcode=Codes.BAD_JSON,
)
+ # The event content should *not* include the authorising user as
+ # it won't be properly signed. Strip it out since it might come
+ # back from a client updating a display name / avatar.
+ #
+ # This only applies to restricted rooms, but there should be no reason
+ # for a client to include it. Unconditionally remove it.
+ content.pop(EventContentFields.AUTHORISING_USER, None)
+
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
@@ -939,7 +947,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# be included in the event content in order to efficiently validate
# the event.
content[
- "join_authorised_via_users_server"
+ EventContentFields.AUTHORISING_USER
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
current_state_ids,
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 5446fda5e7..1dea09e480 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from synapse.api.constants import EventContentFields
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
from synapse.events.utils import (
@@ -352,7 +353,7 @@ class PruneEventTestCase(unittest.TestCase):
"event_id": "$test:domain",
"content": {
"membership": "join",
- "join_authorised_via_users_server": "@user:domain",
+ EventContentFields.AUTHORISING_USER: "@user:domain",
"other_key": "stripped",
},
},
@@ -372,7 +373,7 @@ class PruneEventTestCase(unittest.TestCase):
"type": "m.room.member",
"content": {
"membership": "join",
- "join_authorised_via_users_server": "@user:domain",
+ EventContentFields.AUTHORISING_USER: "@user:domain",
"other_key": "stripped",
},
},
@@ -380,7 +381,7 @@ class PruneEventTestCase(unittest.TestCase):
"type": "m.room.member",
"content": {
"membership": "join",
- "join_authorised_via_users_server": "@user:domain",
+ EventContentFields.AUTHORISING_USER: "@user:domain",
},
"signatures": {},
"unsigned": {},
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index 6ebd01bcbe..1a4d078780 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -16,6 +16,7 @@ import unittest
from typing import Optional
from synapse import event_auth
+from synapse.api.constants import EventContentFields
from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
@@ -380,7 +381,7 @@ class EventAuthTestCase(unittest.TestCase):
authorised_join_event = _join_event(
pleb,
additional_content={
- "join_authorised_via_users_server": "@creator:example.com"
+ EventContentFields.AUTHORISING_USER: "@creator:example.com"
},
)
event_auth.check(
@@ -404,7 +405,7 @@ class EventAuthTestCase(unittest.TestCase):
_join_event(
pleb,
additional_content={
- "join_authorised_via_users_server": "@inviter:foo.test"
+ EventContentFields.AUTHORISING_USER: "@inviter:foo.test"
},
),
pl_auth_events,
@@ -431,7 +432,7 @@ class EventAuthTestCase(unittest.TestCase):
_join_event(
pleb,
additional_content={
- "join_authorised_via_users_server": "@other:example.com"
+ EventContentFields.AUTHORISING_USER: "@other:example.com"
},
),
auth_events,
@@ -448,7 +449,7 @@ class EventAuthTestCase(unittest.TestCase):
"join",
sender=creator,
additional_content={
- "join_authorised_via_users_server": "@inviter:foo.test"
+ EventContentFields.AUTHORISING_USER: "@inviter:foo.test"
},
),
auth_events,
--
cgit 1.5.1
From d1cbad388fc42d483e0e3b107620852f359d2cc8 Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Fri, 1 Oct 2021 17:22:13 +0100
Subject: Fix error in `get_user_ip_and_agents` when fetching from the database
(#10968)
---
changelog.d/10968.bugfix | 1 +
synapse/storage/databases/main/client_ips.py | 4 ++--
tests/storage/test_client_ips.py | 34 ++++++++++++++++++++++++++++
3 files changed, 37 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/10968.bugfix
diff --git a/changelog.d/10968.bugfix b/changelog.d/10968.bugfix
new file mode 100644
index 0000000000..76624ed73c
--- /dev/null
+++ b/changelog.d/10968.bugfix
@@ -0,0 +1 @@
+Fix `/admin/whois/{user_id}` endpoint, which was broken in v1.44.0rc1.
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 7e33ae578c..cc192f5c87 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -591,8 +591,8 @@ class ClientIpStore(ClientIpWorkerStore):
)
results.update(
- ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"]))
- for row in rows
+ ((access_token, ip), (user_agent, last_seen))
+ for access_token, ip, user_agent, last_seen in rows
)
return [
{
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 1c2df54ecc..3cc8038f1e 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -15,9 +15,12 @@
from unittest.mock import Mock
+from parameterized import parameterized
+
import synapse.rest.admin
from synapse.http.site import XForwardedForRequest
from synapse.rest.client import login
+from synapse.types import UserID
from tests import unittest
from tests.server import make_request
@@ -143,6 +146,37 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
],
)
+ @parameterized.expand([(False,), (True,)])
+ def test_get_user_ip_and_agents(self, after_persisting: bool):
+ """Test `get_user_ip_and_agents` for persisted and unpersisted data"""
+ self.reactor.advance(12345678)
+
+ user_id = "@user:id"
+ user = UserID.from_string(user_id)
+
+ # Insert a user IP
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "MY_DEVICE"
+ )
+ )
+
+ if after_persisting:
+ # Trigger the storage loop
+ self.reactor.advance(10)
+
+ self.assertEqual(
+ self.get_success(self.store.get_user_ip_and_agents(user)),
+ [
+ {
+ "access_token": "access_token",
+ "ip": "ip",
+ "user_agent": "user_agent",
+ "last_seen": 12345678000,
+ },
+ ],
+ )
+
@override_config({"limit_usage_by_mau": False, "max_mau_value": 50})
def test_disabled_monthly_active_user(self):
user_id = "@user:server"
--
cgit 1.5.1
From a0f48ee89d88fd7b6da8023dbba607a69073152e Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Mon, 4 Oct 2021 07:18:54 -0400
Subject: Use direct references for configuration variables (part 7). (#10959)
---
changelog.d/10959.misc | 1 +
synapse/handlers/auth.py | 2 +-
synapse/handlers/identity.py | 13 ++++++++++---
synapse/handlers/profile.py | 4 ++--
synapse/handlers/register.py | 9 ++++++---
synapse/handlers/room_member.py | 2 +-
synapse/handlers/ui_auth/checkers.py | 14 ++++++++------
synapse/rest/admin/users.py | 4 ++--
synapse/rest/client/account.py | 22 +++++++++++-----------
synapse/rest/client/auth.py | 6 ++++--
synapse/rest/client/capabilities.py | 6 +++---
synapse/rest/client/login.py | 6 +++---
synapse/rest/client/register.py | 26 +++++++++++++-------------
synapse/rest/well_known.py | 4 ++--
synapse/storage/databases/main/registration.py | 2 +-
synapse/util/threepids.py | 4 ++--
tests/config/test_load.py | 6 +++---
tests/handlers/test_profile.py | 4 ++--
tests/rest/admin/test_user.py | 4 ++--
tests/rest/client/test_account.py | 4 ++--
tests/rest/client/test_identity.py | 2 +-
tests/rest/client/test_register.py | 4 ++--
tests/unittest.py | 2 +-
23 files changed, 83 insertions(+), 68 deletions(-)
create mode 100644 changelog.d/10959.misc
diff --git a/changelog.d/10959.misc b/changelog.d/10959.misc
new file mode 100644
index 0000000000..586a0b3a96
--- /dev/null
+++ b/changelog.d/10959.misc
@@ -0,0 +1 @@
+Use direct references to config flags.
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index a8c717efd5..2d0f3d566c 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -198,7 +198,7 @@ class AuthHandler(BaseHandler):
if inst.is_enabled():
self.checkers[inst.AUTH_TYPE] = inst # type: ignore
- self.bcrypt_rounds = hs.config.bcrypt_rounds
+ self.bcrypt_rounds = hs.config.registration.bcrypt_rounds
# we can't use hs.get_module_api() here, because to do so will create an
# import loop.
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index a0640fcac0..c881475c25 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -573,9 +573,15 @@ class IdentityHandler(BaseHandler):
# Try to validate as email
if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ # Remote emails will only be used if a valid identity server is provided.
+ assert (
+ self.hs.config.registration.account_threepid_delegate_email is not None
+ )
+
# Ask our delegated email identity server
validation_session = await self.threepid_from_creds(
- self.hs.config.account_threepid_delegate_email, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_email,
+ threepid_creds,
)
elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
# Get a validated session matching these details
@@ -587,10 +593,11 @@ class IdentityHandler(BaseHandler):
return validation_session
# Try to validate as msisdn
- if self.hs.config.account_threepid_delegate_msisdn:
+ if self.hs.config.registration.account_threepid_delegate_msisdn:
# Ask our delegated msisdn identity server
validation_session = await self.threepid_from_creds(
- self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_msisdn,
+ threepid_creds,
)
return validation_session
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 425c0d4973..2e19706c69 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -178,7 +178,7 @@ class ProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
- if not by_admin and not self.hs.config.enable_set_displayname:
+ if not by_admin and not self.hs.config.registration.enable_set_displayname:
profile = await self.store.get_profileinfo(target_user.localpart)
if profile.display_name:
raise SynapseError(
@@ -268,7 +268,7 @@ class ProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
- if not by_admin and not self.hs.config.enable_set_avatar_url:
+ if not by_admin and not self.hs.config.registration.enable_set_avatar_url:
profile = await self.store.get_profileinfo(target_user.localpart)
if profile.avatar_url:
raise SynapseError(
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index cb4eb0720b..441af7a848 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -116,8 +116,8 @@ class RegistrationHandler(BaseHandler):
self._register_device_client = self.register_device_inner
self.pusher_pool = hs.get_pusherpool()
- self.session_lifetime = hs.config.session_lifetime
- self.access_token_lifetime = hs.config.access_token_lifetime
+ self.session_lifetime = hs.config.registration.session_lifetime
+ self.access_token_lifetime = hs.config.registration.access_token_lifetime
init_counters_for_auth_provider("")
@@ -343,7 +343,10 @@ class RegistrationHandler(BaseHandler):
# If the user does not need to consent at registration, auto-join any
# configured rooms.
if not self.hs.config.consent.user_consent_at_registration:
- if not self.hs.config.auto_join_rooms_for_guests and make_guest:
+ if (
+ not self.hs.config.registration.auto_join_rooms_for_guests
+ and make_guest
+ ):
logger.info(
"Skipping auto-join for %s because auto-join for guests is disabled",
user_id,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 29b3e41cc9..c8fb24a20c 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -89,7 +89,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
- self._enable_lookup = hs.config.enable_3pid_lookup
+ self._enable_lookup = hs.config.registration.enable_3pid_lookup
self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index 8f5d465fa1..184730ebe8 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -153,21 +153,23 @@ class _BaseThreepidAuthChecker:
# msisdns are currently always ThreepidBehaviour.REMOTE
if medium == "msisdn":
- if not self.hs.config.account_threepid_delegate_msisdn:
+ if not self.hs.config.registration.account_threepid_delegate_msisdn:
raise SynapseError(
400, "Phone number verification is not enabled on this homeserver"
)
threepid = await identity_handler.threepid_from_creds(
- self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_msisdn,
+ threepid_creds,
)
elif medium == "email":
if (
self.hs.config.email.threepid_behaviour_email
== ThreepidBehaviour.REMOTE
):
- assert self.hs.config.account_threepid_delegate_email
+ assert self.hs.config.registration.account_threepid_delegate_email
threepid = await identity_handler.threepid_from_creds(
- self.hs.config.account_threepid_delegate_email, threepid_creds
+ self.hs.config.registration.account_threepid_delegate_email,
+ threepid_creds,
)
elif (
self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
@@ -240,7 +242,7 @@ class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
_BaseThreepidAuthChecker.__init__(self, hs)
def is_enabled(self) -> bool:
- return bool(self.hs.config.account_threepid_delegate_msisdn)
+ return bool(self.hs.config.registration.account_threepid_delegate_msisdn)
async def check_auth(self, authdict: dict, clientip: str) -> Any:
return await self._check_threepid("msisdn", authdict)
@@ -252,7 +254,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
- self._enabled = bool(hs.config.registration_requires_token)
+ self._enabled = bool(hs.config.registration.registration_requires_token)
self.store = hs.get_datastore()
def is_enabled(self) -> bool:
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 46bfec4623..f20aa65301 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -442,7 +442,7 @@ class UserRegisterServlet(RestServlet):
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
self._clear_old_nonces()
- if not self.hs.config.registration_shared_secret:
+ if not self.hs.config.registration.registration_shared_secret:
raise SynapseError(400, "Shared secret registration is not enabled")
body = parse_json_object_from_request(request)
@@ -498,7 +498,7 @@ class UserRegisterServlet(RestServlet):
got_mac = body["mac"]
want_mac_builder = hmac.new(
- key=self.hs.config.registration_shared_secret.encode(),
+ key=self.hs.config.registration.registration_shared_secret.encode(),
digestmod=hashlib.sha1,
)
want_mac_builder.update(nonce.encode("utf8"))
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index fff133ef10..6b272658fc 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -130,11 +130,11 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- assert self.hs.config.account_threepid_delegate_email
+ assert self.hs.config.registration.account_threepid_delegate_email
# Have the configured identity server handle the request
ret = await self.identity_handler.requestEmailToken(
- self.hs.config.account_threepid_delegate_email,
+ self.hs.config.registration.account_threepid_delegate_email,
email,
client_secret,
send_attempt,
@@ -414,11 +414,11 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- assert self.hs.config.account_threepid_delegate_email
+ assert self.hs.config.registration.account_threepid_delegate_email
# Have the configured identity server handle the request
ret = await self.identity_handler.requestEmailToken(
- self.hs.config.account_threepid_delegate_email,
+ self.hs.config.registration.account_threepid_delegate_email,
email,
client_secret,
send_attempt,
@@ -496,7 +496,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
- if not self.hs.config.account_threepid_delegate_msisdn:
+ if not self.hs.config.registration.account_threepid_delegate_msisdn:
logger.warning(
"No upstream msisdn account_threepid_delegate configured on the server to "
"handle this request"
@@ -507,7 +507,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
)
ret = await self.identity_handler.requestMsisdnToken(
- self.hs.config.account_threepid_delegate_msisdn,
+ self.hs.config.registration.account_threepid_delegate_msisdn,
country,
phone_number,
client_secret,
@@ -604,7 +604,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
async def on_POST(self, request: Request) -> Tuple[int, JsonDict]:
- if not self.config.account_threepid_delegate_msisdn:
+ if not self.config.registration.account_threepid_delegate_msisdn:
raise SynapseError(
400,
"This homeserver is not validating phone numbers. Use an identity server "
@@ -617,7 +617,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
# Proxy submit_token request to msisdn threepid delegate
response = await self.identity_handler.proxy_msisdn_submit_token(
- self.config.account_threepid_delegate_msisdn,
+ self.config.registration.account_threepid_delegate_msisdn,
body["client_secret"],
body["sid"],
body["token"],
@@ -644,7 +644,7 @@ class ThreepidRestServlet(RestServlet):
return 200, {"threepids": threepids}
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if not self.hs.config.enable_3pid_changes:
+ if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
@@ -693,7 +693,7 @@ class ThreepidAddRestServlet(RestServlet):
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if not self.hs.config.enable_3pid_changes:
+ if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
@@ -801,7 +801,7 @@ class ThreepidDeleteRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if not self.hs.config.enable_3pid_changes:
+ if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index 282861fae2..c9ad35a3ad 100644
--- a/synapse/rest/client/auth.py
+++ b/synapse/rest/client/auth.py
@@ -49,8 +49,10 @@ class AuthRestServlet(RestServlet):
self.registration_handler = hs.get_registration_handler()
self.recaptcha_template = hs.config.captcha.recaptcha_template
self.terms_template = hs.config.terms_template
- self.registration_token_template = hs.config.registration_token_template
- self.success_template = hs.config.fallback_success_template
+ self.registration_token_template = (
+ hs.config.registration.registration_token_template
+ )
+ self.success_template = hs.config.registration.fallback_success_template
async def on_GET(self, request: SynapseRequest, stagetype: str) -> None:
session = parse_string(request, "session")
diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py
index d6b6256413..2a3e24ae7e 100644
--- a/synapse/rest/client/capabilities.py
+++ b/synapse/rest/client/capabilities.py
@@ -64,13 +64,13 @@ class CapabilitiesRestServlet(RestServlet):
if self.config.experimental.msc3283_enabled:
response["capabilities"]["org.matrix.msc3283.set_displayname"] = {
- "enabled": self.config.enable_set_displayname
+ "enabled": self.config.registration.enable_set_displayname
}
response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = {
- "enabled": self.config.enable_set_avatar_url
+ "enabled": self.config.registration.enable_set_avatar_url
}
response["capabilities"]["org.matrix.msc3283.3pid_changes"] = {
- "enabled": self.config.enable_3pid_changes
+ "enabled": self.config.registration.enable_3pid_changes
}
return 200, response
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index fa5c173f4b..d49a647b03 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -79,7 +79,7 @@ class LoginRestServlet(RestServlet):
self.saml2_enabled = hs.config.saml2.saml2_enabled
self.cas_enabled = hs.config.cas.cas_enabled
self.oidc_enabled = hs.config.oidc.oidc_enabled
- self._msc2918_enabled = hs.config.access_token_lifetime is not None
+ self._msc2918_enabled = hs.config.registration.access_token_lifetime is not None
self.auth = hs.get_auth()
@@ -447,7 +447,7 @@ class RefreshTokenServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
self._auth_handler = hs.get_auth_handler()
self._clock = hs.get_clock()
- self.access_token_lifetime = hs.config.access_token_lifetime
+ self.access_token_lifetime = hs.config.registration.access_token_lifetime
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
refresh_submission = parse_json_object_from_request(request)
@@ -556,7 +556,7 @@ class CasTicketServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
LoginRestServlet(hs).register(http_server)
- if hs.config.access_token_lifetime is not None:
+ if hs.config.registration.access_token_lifetime is not None:
RefreshTokenServlet(hs).register(http_server)
SsoRedirectServlet(hs).register(http_server)
if hs.config.cas.cas_enabled:
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index a6eb6f6410..bf3cb34146 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -140,11 +140,11 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- assert self.hs.config.account_threepid_delegate_email
+ assert self.hs.config.registration.account_threepid_delegate_email
# Have the configured identity server handle the request
ret = await self.identity_handler.requestEmailToken(
- self.hs.config.account_threepid_delegate_email,
+ self.hs.config.registration.account_threepid_delegate_email,
email,
client_secret,
send_attempt,
@@ -221,7 +221,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
400, "Phone number is already in use", Codes.THREEPID_IN_USE
)
- if not self.hs.config.account_threepid_delegate_msisdn:
+ if not self.hs.config.registration.account_threepid_delegate_msisdn:
logger.warning(
"No upstream msisdn account_threepid_delegate configured on the server to "
"handle this request"
@@ -231,7 +231,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
)
ret = await self.identity_handler.requestMsisdnToken(
- self.hs.config.account_threepid_delegate_msisdn,
+ self.hs.config.registration.account_threepid_delegate_msisdn,
country,
phone_number,
client_secret,
@@ -341,7 +341,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
)
async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
- if not self.hs.config.enable_registration:
+ if not self.hs.config.registration.enable_registration:
raise SynapseError(
403, "Registration has been disabled", errcode=Codes.FORBIDDEN
)
@@ -391,7 +391,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
await self.ratelimiter.ratelimit(None, (request.getClientIP(),))
- if not self.hs.config.enable_registration:
+ if not self.hs.config.registration.enable_registration:
raise SynapseError(
403, "Registration has been disabled", errcode=Codes.FORBIDDEN
)
@@ -419,8 +419,8 @@ class RegisterRestServlet(RestServlet):
self.ratelimiter = hs.get_registration_ratelimiter()
self.password_policy_handler = hs.get_password_policy_handler()
self.clock = hs.get_clock()
- self._registration_enabled = self.hs.config.enable_registration
- self._msc2918_enabled = hs.config.access_token_lifetime is not None
+ self._registration_enabled = self.hs.config.registration.enable_registration
+ self._msc2918_enabled = hs.config.registration.access_token_lifetime is not None
self._registration_flows = _calculate_registration_flows(
hs.config, self.auth_handler
@@ -800,7 +800,7 @@ class RegisterRestServlet(RestServlet):
async def _do_guest_registration(
self, params: JsonDict, address: Optional[str] = None
) -> Tuple[int, JsonDict]:
- if not self.hs.config.allow_guest_access:
+ if not self.hs.config.registration.allow_guest_access:
raise SynapseError(403, "Guest access is disabled")
user_id = await self.registration_handler.register_user(
make_guest=True, address=address
@@ -849,13 +849,13 @@ def _calculate_registration_flows(
"""
# FIXME: need a better error than "no auth flow found" for scenarios
# where we required 3PID for registration but the user didn't give one
- require_email = "email" in config.registrations_require_3pid
- require_msisdn = "msisdn" in config.registrations_require_3pid
+ require_email = "email" in config.registration.registrations_require_3pid
+ require_msisdn = "msisdn" in config.registration.registrations_require_3pid
show_msisdn = True
show_email = True
- if config.disable_msisdn_registration:
+ if config.registration.disable_msisdn_registration:
show_msisdn = False
require_msisdn = False
@@ -909,7 +909,7 @@ def _calculate_registration_flows(
flow.insert(0, LoginType.RECAPTCHA)
# Prepend registration token to all flows if we're requiring a token
- if config.registration_requires_token:
+ if config.registration.registration_requires_token:
for flow in flows:
flow.insert(0, LoginType.REGISTRATION_TOKEN)
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index c80a3a99aa..7ac01faab4 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -39,9 +39,9 @@ class WellKnownBuilder:
result = {"m.homeserver": {"base_url": self._config.server.public_baseurl}}
- if self._config.default_identity_server:
+ if self._config.registration.default_identity_server:
result["m.identity_server"] = {
- "base_url": self._config.default_identity_server
+ "base_url": self._config.registration.default_identity_server
}
return result
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 7279b0924e..de262fbf5a 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -1710,7 +1710,7 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
We do this by grandfathering in existing user threepids assuming that
they used one of the server configured trusted identity servers.
"""
- id_servers = set(self.config.trusted_third_party_id_servers)
+ id_servers = set(self.config.registration.trusted_third_party_id_servers)
def _bg_user_threepids_grandfather_txn(txn):
sql = """
diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index baa9190a9a..389adf00f6 100644
--- a/synapse/util/threepids.py
+++ b/synapse/util/threepids.py
@@ -44,8 +44,8 @@ def check_3pid_allowed(hs: "HomeServer", medium: str, address: str) -> bool:
bool: whether the 3PID medium/address is allowed to be added to this HS
"""
- if hs.config.allowed_local_3pids:
- for constraint in hs.config.allowed_local_3pids:
+ if hs.config.registration.allowed_local_3pids:
+ for constraint in hs.config.registration.allowed_local_3pids:
logger.debug(
"Checking 3PID %s (%s) against %s (%s)",
address,
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index ef6c2beec7..8e49ca26d9 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -84,16 +84,16 @@ class ConfigLoadingTestCase(unittest.TestCase):
)
# Check that disable_registration clobbers enable_registration.
config = HomeServerConfig.load_config("", ["-c", self.file])
- self.assertFalse(config.enable_registration)
+ self.assertFalse(config.registration.enable_registration)
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
- self.assertFalse(config.enable_registration)
+ self.assertFalse(config.registration.enable_registration)
# Check that either config value is clobbered by the command line.
config = HomeServerConfig.load_or_generate_config(
"", ["-c", self.file, "--enable-registration"]
)
- self.assertTrue(config.enable_registration)
+ self.assertTrue(config.registration.enable_registration)
def test_stats_enabled(self):
self.generate_config_and_remove_lines_containing("enable_metrics")
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 57cc3e2646..c153018fd8 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -110,7 +110,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
def test_set_my_name_if_disabled(self):
- self.hs.config.enable_set_displayname = False
+ self.hs.config.registration.enable_set_displayname = False
# Setting displayname for the first time is allowed
self.get_success(
@@ -225,7 +225,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
def test_set_my_avatar_if_disabled(self):
- self.hs.config.enable_set_avatar_url = False
+ self.hs.config.registration.enable_set_avatar_url = False
# Setting displayname for the first time is allowed
self.get_success(
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index a285d5a7fe..6ed9e42173 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -59,7 +59,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
self.hs = self.setup_test_homeserver()
- self.hs.config.registration_shared_secret = "shared"
+ self.hs.config.registration.registration_shared_secret = "shared"
self.hs.get_media_repository = Mock()
self.hs.get_deactivate_account_handler = Mock()
@@ -71,7 +71,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
If there is no shared secret, registration through this method will be
prevented.
"""
- self.hs.config.registration_shared_secret = None
+ self.hs.config.registration.registration_shared_secret = None
channel = self.make_request("POST", self.url, b"{}")
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index 2f44547bfb..89d85b0a17 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -664,7 +664,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
def test_add_email_if_disabled(self):
"""Test adding email to profile when doing so is disallowed"""
- self.hs.config.enable_3pid_changes = False
+ self.hs.config.registration.enable_3pid_changes = False
client_secret = "foobar"
session_id = self._request_token(self.email, client_secret)
@@ -734,7 +734,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
def test_delete_email_if_disabled(self):
"""Test deleting an email from profile when disallowed"""
- self.hs.config.enable_3pid_changes = False
+ self.hs.config.registration.enable_3pid_changes = False
# Add a threepid
self.get_success(
diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py
index ca2e8ff8ef..becb4e8dcc 100644
--- a/tests/rest/client/test_identity.py
+++ b/tests/rest/client/test_identity.py
@@ -37,7 +37,7 @@ class IdentityTestCase(unittest.HomeserverTestCase):
return self.hs
def test_3pid_lookup_disabled(self):
- self.hs.config.enable_3pid_lookup = False
+ self.hs.config.registration.enable_3pid_lookup = False
self.register_user("kermit", "monkey")
tok = self.login("kermit", "monkey")
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index af135d57e1..66dcfc9f88 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -147,7 +147,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
def test_POST_guest_registration(self):
self.hs.config.key.macaroon_secret_key = "test"
- self.hs.config.allow_guest_access = True
+ self.hs.config.registration.allow_guest_access = True
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
@@ -156,7 +156,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
self.assertDictContainsSubset(det_data, channel.json_body)
def test_POST_disabled_guest_registration(self):
- self.hs.config.allow_guest_access = False
+ self.hs.config.registration.allow_guest_access = False
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
diff --git a/tests/unittest.py b/tests/unittest.py
index 0807467e39..1f803564f6 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -560,7 +560,7 @@ class HomeserverTestCase(TestCase):
Returns:
The MXID of the new user.
"""
- self.hs.config.registration_shared_secret = "shared"
+ self.hs.config.registration.registration_shared_secret = "shared"
# Create the user
channel = self.make_request("GET", "/_synapse/admin/v1/register")
--
cgit 1.5.1
From f7b034a24bd5e64f05934453fe7b072894e124db Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Mon, 4 Oct 2021 12:45:51 +0100
Subject: Consistently exclude from user_directory (#10960)
* Introduce `should_include_local_users_in_dir`
We exclude three kinds of local users from the user_directory tables. At
present we don't consistently exclude all three in the same places. This
commit introduces a new function to gather those exclusion conditions
together. Because we have to handle local and remote users in different
ways, I've made that function only consider the case of remote users.
It's the caller's responsibility to make the local versus remote
distinction clear and correct.
A test fixup is required. The test now hits a path which makes db
queries against the users table. The expected rows were missing, because
we were using a dummy user that hadn't actually been registered.
We also add new test cases to covert the exclusion logic.
----
By my reading this makes these changes:
* When an app service user registers or changes their profile, they will
_not_ be added to the user directory. (Previously only support and
deactivated users were excluded). This is consistent with the logic that
rebuilds the user directory. See also [the discussion
here](https://github.com/matrix-org/synapse/pull/10914#discussion_r716859548).
* When rebuilding the directory, exclude support and disabled users from
room sharing tables. Previously only appservice users were excluded.
* Exclude all three categories of local users when rebuilding the
directory. Previously `_populate_user_directory_process_users` didn't do
any exclusion.
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10960.bugfix | 1 +
synapse/handlers/user_directory.py | 27 +--
synapse/storage/databases/main/user_directory.py | 46 ++++--
tests/handlers/test_user_directory.py | 200 +++++++++++++++++++++--
tests/rest/client/test_login.py | 17 +-
tests/storage/test_user_directory.py | 146 ++++++++++++++++-
tests/unittest.py | 29 ++++
7 files changed, 409 insertions(+), 57 deletions(-)
create mode 100644 changelog.d/10960.bugfix
diff --git a/changelog.d/10960.bugfix b/changelog.d/10960.bugfix
new file mode 100644
index 0000000000..b4f1c228ea
--- /dev/null
+++ b/changelog.d/10960.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and disabled users.
\ No newline at end of file
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index f4430ce3c9..18d8c8744e 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -132,12 +132,7 @@ class UserDirectoryHandler(StateDeltasHandler):
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
- # Support users are for diagnostics and should not appear in the user directory.
- is_support = await self.store.is_support_user(user_id)
- # When change profile information of deactivated user it should not appear in the user directory.
- is_deactivated = await self.store.get_user_deactivated_status(user_id)
-
- if not (is_support or is_deactivated):
+ if await self.store.should_include_local_user_in_dir(user_id):
await self.store.update_profile_in_user_dir(
user_id, profile.display_name, profile.avatar_url
)
@@ -229,8 +224,10 @@ class UserDirectoryHandler(StateDeltasHandler):
else:
logger.debug("Server is still in room: %r", room_id)
- is_support = await self.store.is_support_user(state_key)
- if not is_support:
+ include_in_dir = not self.is_mine_id(
+ state_key
+ ) or await self.store.should_include_local_user_in_dir(state_key)
+ if include_in_dir:
if change is MatchChange.no_change:
# Handle any profile changes
await self._handle_profile_change(
@@ -356,13 +353,7 @@ class UserDirectoryHandler(StateDeltasHandler):
# First, if they're our user then we need to update for every user
if self.is_mine_id(user_id):
-
- is_appservice = self.store.get_if_app_services_interested_in_user(
- user_id
- )
-
- # We don't care about appservice users.
- if not is_appservice:
+ if await self.store.should_include_local_user_in_dir(user_id):
for other_user_id in other_users_in_room:
if user_id == other_user_id:
continue
@@ -374,10 +365,10 @@ class UserDirectoryHandler(StateDeltasHandler):
if user_id == other_user_id:
continue
- is_appservice = self.store.get_if_app_services_interested_in_user(
+ include_other_user = self.is_mine_id(
other_user_id
- )
- if self.is_mine_id(other_user_id) and not is_appservice:
+ ) and await self.store.should_include_local_user_in_dir(other_user_id)
+ if include_other_user:
to_insert.add((other_user_id, user_id))
if to_insert:
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index c26e3e066f..5f538947ec 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -40,12 +40,10 @@ from synapse.util.caches.descriptors import cached
logger = logging.getLogger(__name__)
-
TEMP_TABLE = "_temp_populate_user_directory"
class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
-
# How many records do we calculate before sending it to
# add_users_who_share_private_rooms?
SHARE_PRIVATE_WORKING_SET = 500
@@ -235,6 +233,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
users_with_profile = await self.get_users_in_room_with_profiles(room_id)
+ # Throw away users excluded from the directory.
+ users_with_profile = {
+ user_id: profile
+ for user_id, profile in users_with_profile.items()
+ if not self.hs.is_mine_id(user_id)
+ or await self.should_include_local_user_in_dir(user_id)
+ }
# Update each user in the user directory.
for user_id, profile in users_with_profile.items():
@@ -246,9 +251,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
if is_public:
for user_id in users_with_profile:
- if self.get_if_app_services_interested_in_user(user_id):
- continue
-
to_insert.add(user_id)
if to_insert:
@@ -256,12 +258,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
to_insert.clear()
else:
for user_id in users_with_profile:
+ # We want the set of pairs (L, M) where L and M are
+ # in `users_with_profile` and L is local.
+ # Do so by looking for the local user L first.
if not self.hs.is_mine_id(user_id):
continue
- if self.get_if_app_services_interested_in_user(user_id):
- continue
-
for other_user_id in users_with_profile:
if user_id == other_user_id:
continue
@@ -349,10 +351,11 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
for user_id in users_to_work_on:
- profile = await self.get_profileinfo(get_localpart_from_id(user_id))
- await self.update_profile_in_user_dir(
- user_id, profile.display_name, profile.avatar_url
- )
+ if await self.should_include_local_user_in_dir(user_id):
+ profile = await self.get_profileinfo(get_localpart_from_id(user_id))
+ await self.update_profile_in_user_dir(
+ user_id, profile.display_name, profile.avatar_url
+ )
# We've finished processing a user. Delete it from the table.
await self.db_pool.simple_delete_one(
@@ -369,6 +372,24 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
return len(users_to_work_on)
+ async def should_include_local_user_in_dir(self, user: str) -> bool:
+ """Certain classes of local user are omitted from the user directory.
+ Is this user one of them?
+ """
+ # App service users aren't usually contactable, so exclude them.
+ if self.get_if_app_services_interested_in_user(user):
+ # TODO we might want to make this configurable for each app service
+ return False
+
+ # Support users are for diagnostics and should not appear in the user directory.
+ if await self.is_support_user(user):
+ return False
+
+ # Deactivated users aren't contactable, so should not appear in the user directory.
+ if await self.get_user_deactivated_status(user):
+ return False
+ return True
+
async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool:
"""Check if the room is either world_readable or publically joinable"""
@@ -537,7 +558,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
-
# How many records do we calculate before sending it to
# add_users_who_share_private_rooms?
SHARE_PRIVATE_WORKING_SET = 500
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 2988befb21..b3c3af113b 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Tuple
from unittest.mock import Mock, patch
from urllib.parse import quote
@@ -20,7 +21,8 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import UserTypes
from synapse.api.room_versions import RoomVersion, RoomVersions
-from synapse.rest.client import login, room, user_directory
+from synapse.appservice import ApplicationService
+from synapse.rest.client import login, register, room, user_directory
from synapse.server import HomeServer
from synapse.storage.roommember import ProfileInfo
from synapse.types import create_requester
@@ -28,6 +30,7 @@ from synapse.util import Clock
from tests import unittest
from tests.storage.test_user_directory import GetUserDirectoryTables
+from tests.test_utils.event_injection import inject_member_event
from tests.unittest import override_config
@@ -47,13 +50,29 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
synapse.rest.admin.register_servlets,
+ register.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["update_user_directory"] = True
- return self.setup_test_homeserver(config=config)
+
+ self.appservice = ApplicationService(
+ token="i_am_an_app_service",
+ hostname="test",
+ id="1234",
+ namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
+ sender="@as:test",
+ )
+
+ mock_load_appservices = Mock(return_value=[self.appservice])
+ with patch(
+ "synapse.storage.databases.main.appservice.load_appservices",
+ mock_load_appservices,
+ ):
+ hs = self.setup_test_homeserver(config=config)
+ return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastore()
@@ -62,6 +81,137 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.event_creation_handler = self.hs.get_event_creation_handler()
self.user_dir_helper = GetUserDirectoryTables(self.store)
+ def test_normal_user_pair(self) -> None:
+ """Sanity check that the room-sharing tables are updated correctly."""
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ bob = self.register_user("bob", "pass")
+ bob_token = self.login(bob, "pass")
+
+ public = self.helper.create_room_as(
+ alice,
+ is_public=True,
+ extra_content={"visibility": "public"},
+ tok=alice_token,
+ )
+ private = self.helper.create_room_as(alice, is_public=False, tok=alice_token)
+ self.helper.invite(private, alice, bob, tok=alice_token)
+ self.helper.join(public, bob, tok=bob_token)
+ self.helper.join(private, bob, tok=bob_token)
+
+ # Alice also makes a second public room but no-one else joins
+ public2 = self.helper.create_room_as(
+ alice,
+ is_public=True,
+ extra_content={"visibility": "public"},
+ tok=alice_token,
+ )
+
+ users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
+ in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ in_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+
+ self.assertEqual(users, {alice, bob})
+ self.assertEqual(
+ set(in_public), {(alice, public), (bob, public), (alice, public2)}
+ )
+ self.assertEqual(
+ self.user_dir_helper._compress_shared(in_private),
+ {(alice, bob, private), (bob, alice, private)},
+ )
+
+ # The next three tests (test_population_excludes_*) all setup
+ # - A normal user included in the user dir
+ # - A public and private room created by that user
+ # - A user excluded from the room dir, belonging to both rooms
+
+ # They match similar logic in storage/test_user_directory. But that tests
+ # rebuilding the directory; this tests updating it incrementally.
+
+ def test_excludes_support_user(self) -> None:
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ support = "@support1:test"
+ self.get_success(
+ self.store.register_user(
+ user_id=support, password_hash=None, user_type=UserTypes.SUPPORT
+ )
+ )
+
+ public, private = self._create_rooms_and_inject_memberships(
+ alice, alice_token, support
+ )
+ self._check_only_one_user_in_directory(alice, public)
+
+ def test_excludes_deactivated_user(self) -> None:
+ admin = self.register_user("admin", "pass", admin=True)
+ admin_token = self.login(admin, "pass")
+ user = self.register_user("naughty", "pass")
+
+ # Deactivate the user.
+ channel = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v2/users/{user}",
+ access_token=admin_token,
+ content={"deactivated": True},
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["deactivated"], True)
+
+ # Join the deactivated user to rooms owned by the admin.
+ # Is this something that could actually happen outside of a test?
+ public, private = self._create_rooms_and_inject_memberships(
+ admin, admin_token, user
+ )
+ self._check_only_one_user_in_directory(admin, public)
+
+ def test_excludes_appservices_user(self) -> None:
+ # Register an AS user.
+ user = self.register_user("user", "pass")
+ token = self.login(user, "pass")
+ as_user = self.register_appservice_user("as_user_potato", self.appservice.token)
+
+ # Join the AS user to rooms owned by the normal user.
+ public, private = self._create_rooms_and_inject_memberships(
+ user, token, as_user
+ )
+ self._check_only_one_user_in_directory(user, public)
+
+ def _create_rooms_and_inject_memberships(
+ self, creator: str, token: str, joiner: str
+ ) -> Tuple[str, str]:
+ """Create a public and private room as a normal user.
+ Then get the `joiner` into those rooms.
+ """
+ # TODO: Duplicates the same-named method in UserDirectoryInitialPopulationTest.
+ public_room = self.helper.create_room_as(
+ creator,
+ is_public=True,
+ # See https://github.com/matrix-org/synapse/issues/10951
+ extra_content={"visibility": "public"},
+ tok=token,
+ )
+ private_room = self.helper.create_room_as(creator, is_public=False, tok=token)
+
+ # HACK: get the user into these rooms
+ self.get_success(inject_member_event(self.hs, public_room, joiner, "join"))
+ self.get_success(inject_member_event(self.hs, private_room, joiner, "join"))
+
+ return public_room, private_room
+
+ def _check_only_one_user_in_directory(self, user: str, public: str) -> None:
+ users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
+ in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ in_private = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+
+ self.assertEqual(users, {user})
+ self.assertEqual(set(in_public), {(user, public)})
+ self.assertEqual(in_private, [])
+
def test_handle_local_profile_change_with_support_user(self) -> None:
support_user_id = "@support:test"
self.get_success(
@@ -125,6 +275,26 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
self.assertTrue(profile is None)
+ def test_handle_local_profile_change_with_appservice_user(self) -> None:
+ # create user
+ as_user_id = self.register_appservice_user(
+ "as_user_alice", self.appservice.token
+ )
+
+ # profile is not in directory
+ profile = self.get_success(self.store.get_user_in_directory(as_user_id))
+ self.assertTrue(profile is None)
+
+ # update profile
+ profile_info = ProfileInfo(avatar_url="avatar_url", display_name="4L1c3")
+ self.get_success(
+ self.handler.handle_local_profile_change(as_user_id, profile_info)
+ )
+
+ # profile is still not in directory
+ profile = self.get_success(self.store.get_user_in_directory(as_user_id))
+ self.assertTrue(profile is None)
+
def test_handle_user_deactivated_support_user(self) -> None:
s_user_id = "@support:test"
self.get_success(
@@ -483,8 +653,6 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
- user_id = "@test:test"
-
servlets = [
user_directory.register_servlets,
room.register_servlets,
@@ -504,16 +672,21 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
def test_disabling_room_list(self) -> None:
self.config.userdirectory.user_directory_search_enabled = True
- # First we create a room with another user so that user dir is non-empty
- # for our user
- self.helper.create_room_as(self.user_id)
+ # Create two users and put them in the same room.
+ u1 = self.register_user("user1", "pass")
+ u1_token = self.login(u1, "pass")
u2 = self.register_user("user2", "pass")
- room = self.helper.create_room_as(self.user_id)
- self.helper.join(room, user=u2)
+ u2_token = self.login(u2, "pass")
+
+ room = self.helper.create_room_as(u1, tok=u1_token)
+ self.helper.join(room, user=u2, tok=u2_token)
- # Assert user directory is not empty
+ # Each should see the other when searching the user directory.
channel = self.make_request(
- "POST", b"user_directory/search", b'{"search_term":"user2"}'
+ "POST",
+ b"user_directory/search",
+ b'{"search_term":"user2"}',
+ access_token=u1_token,
)
self.assertEquals(200, channel.code, channel.result)
self.assertTrue(len(channel.json_body["results"]) > 0)
@@ -521,7 +694,10 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
# Disable user directory and check search returns nothing
self.config.userdirectory.user_directory_search_enabled = False
channel = self.make_request(
- "POST", b"user_directory/search", b'{"search_term":"user2"}'
+ "POST",
+ b"user_directory/search",
+ b'{"search_term":"user2"}',
+ access_token=u1_token,
)
self.assertEquals(200, channel.code, channel.result)
self.assertTrue(len(channel.json_body["results"]) == 0)
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 7fd92c94e0..a63f04bd41 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -1064,13 +1064,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
register.register_servlets,
]
- def register_as_user(self, username):
- self.make_request(
- b"POST",
- "/_matrix/client/r0/register?access_token=%s" % (self.service.token,),
- {"username": username},
- )
-
def make_homeserver(self, reactor, clock):
self.hs = self.setup_test_homeserver()
@@ -1107,7 +1100,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
def test_login_appservice_user(self):
"""Test that an appservice user can use /login"""
- self.register_as_user(AS_USER)
+ self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
@@ -1121,7 +1114,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
def test_login_appservice_user_bot(self):
"""Test that the appservice bot can use /login"""
- self.register_as_user(AS_USER)
+ self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
@@ -1135,7 +1128,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
def test_login_appservice_wrong_user(self):
"""Test that non-as users cannot login with the as token"""
- self.register_as_user(AS_USER)
+ self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
@@ -1149,7 +1142,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
def test_login_appservice_wrong_as(self):
"""Test that as users cannot login with wrong as token"""
- self.register_as_user(AS_USER)
+ self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
@@ -1165,7 +1158,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
"""Test that users must provide a token when using the appservice
login method
"""
- self.register_as_user(AS_USER)
+ self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 74c8a8599e..6884ca9b7a 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -12,15 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Set, Tuple
+from unittest.mock import Mock, patch
from twisted.test.proto_helpers import MemoryReactor
+from synapse.api.constants import UserTypes
+from synapse.appservice import ApplicationService
from synapse.rest import admin
-from synapse.rest.client import login, room
+from synapse.rest.client import login, register, room
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.util import Clock
+from tests.test_utils.event_injection import inject_member_event
from tests.unittest import HomeserverTestCase, override_config
ALICE = "@alice:a"
@@ -64,6 +68,14 @@ class GetUserDirectoryTables:
["user_id", "other_user_id", "room_id"],
)
+ async def get_users_in_user_directory(self) -> Set[str]:
+ result = await self.store.db_pool.simple_select_list(
+ "user_directory",
+ None,
+ ["user_id"],
+ )
+ return {row["user_id"] for row in result}
+
class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
"""Ensure that rebuilding the directory writes the correct data to the DB.
@@ -74,10 +86,28 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
servlets = [
login.register_servlets,
- admin.register_servlets_for_client_rest_resource,
+ admin.register_servlets,
room.register_servlets,
+ register.register_servlets,
]
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.appservice = ApplicationService(
+ token="i_am_an_app_service",
+ hostname="test",
+ id="1234",
+ namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
+ sender="@as:test",
+ )
+
+ mock_load_appservices = Mock(return_value=[self.appservice])
+ with patch(
+ "synapse.storage.databases.main.appservice.load_appservices",
+ mock_load_appservices,
+ ):
+ hs = super().make_homeserver(reactor, clock)
+ return hs
+
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastore()
self.user_dir_helper = GetUserDirectoryTables(self.store)
@@ -204,6 +234,118 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
{(u1, u3, private_room), (u3, u1, private_room)},
)
+ # All three should have entries in the directory
+ users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
+ self.assertEqual(users, {u1, u2, u3})
+
+ # The next three tests (test_population_excludes_*) all set up
+ # - A normal user included in the user dir
+ # - A public and private room created by that user
+ # - A user excluded from the room dir, belonging to both rooms
+
+ # They match similar logic in handlers/test_user_directory.py But that tests
+ # updating the directory; this tests rebuilding it from scratch.
+
+ def _create_rooms_and_inject_memberships(
+ self, creator: str, token: str, joiner: str
+ ) -> Tuple[str, str]:
+ """Create a public and private room as a normal user.
+ Then get the `joiner` into those rooms.
+ """
+ public_room = self.helper.create_room_as(
+ creator,
+ is_public=True,
+ # See https://github.com/matrix-org/synapse/issues/10951
+ extra_content={"visibility": "public"},
+ tok=token,
+ )
+ private_room = self.helper.create_room_as(creator, is_public=False, tok=token)
+
+ # HACK: get the user into these rooms
+ self.get_success(inject_member_event(self.hs, public_room, joiner, "join"))
+ self.get_success(inject_member_event(self.hs, private_room, joiner, "join"))
+
+ return public_room, private_room
+
+ def _check_room_sharing_tables(
+ self, normal_user: str, public_room: str, private_room: str
+ ) -> None:
+ # After rebuilding the directory, we should only see the normal user.
+ users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
+ self.assertEqual(users, {normal_user})
+ in_public_rooms = self.get_success(
+ self.user_dir_helper.get_users_in_public_rooms()
+ )
+ self.assertEqual(set(in_public_rooms), {(normal_user, public_room)})
+ in_private_rooms = self.get_success(
+ self.user_dir_helper.get_users_who_share_private_rooms()
+ )
+ self.assertEqual(in_private_rooms, [])
+
+ def test_population_excludes_support_user(self) -> None:
+ # Create a normal and support user.
+ user = self.register_user("user", "pass")
+ token = self.login(user, "pass")
+ support = "@support1:test"
+ self.get_success(
+ self.store.register_user(
+ user_id=support, password_hash=None, user_type=UserTypes.SUPPORT
+ )
+ )
+
+ # Join the support user to rooms owned by the normal user.
+ public, private = self._create_rooms_and_inject_memberships(
+ user, token, support
+ )
+
+ # Rebuild the directory.
+ self._purge_and_rebuild_user_dir()
+
+ # Check the support user is not in the directory.
+ self._check_room_sharing_tables(user, public, private)
+
+ def test_population_excludes_deactivated_user(self) -> None:
+ user = self.register_user("naughty", "pass")
+ admin = self.register_user("admin", "pass", admin=True)
+ admin_token = self.login(admin, "pass")
+
+ # Deactivate the user.
+ channel = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v2/users/{user}",
+ access_token=admin_token,
+ content={"deactivated": True},
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["deactivated"], True)
+
+ # Join the deactivated user to rooms owned by the admin.
+ # Is this something that could actually happen outside of a test?
+ public, private = self._create_rooms_and_inject_memberships(
+ admin, admin_token, user
+ )
+
+ # Rebuild the user dir. The deactivated user should be missing.
+ self._purge_and_rebuild_user_dir()
+ self._check_room_sharing_tables(admin, public, private)
+
+ def test_population_excludes_appservice_user(self) -> None:
+ # Register an AS user.
+ user = self.register_user("user", "pass")
+ token = self.login(user, "pass")
+ as_user = self.register_appservice_user("as_user_potato", self.appservice.token)
+
+ # Join the AS user to rooms owned by the normal user.
+ public, private = self._create_rooms_and_inject_memberships(
+ user, token, as_user
+ )
+
+ # Rebuild the directory.
+ self._purge_and_rebuild_user_dir()
+
+ # Check the AS user is not in the directory.
+ self._check_room_sharing_tables(user, public, private)
+
class UserDirectoryStoreTestCase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
diff --git a/tests/unittest.py b/tests/unittest.py
index 1f803564f6..ae393ee53e 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -596,6 +596,35 @@ class HomeserverTestCase(TestCase):
user_id = channel.json_body["user_id"]
return user_id
+ def register_appservice_user(
+ self,
+ username: str,
+ appservice_token: str,
+ ) -> str:
+ """Register an appservice user as an application service.
+ Requires the client-facing registration API be registered.
+
+ Args:
+ username: the user to be registered by an application service.
+ Should be a full username, i.e. ""@localpart:hostname" as opposed to just "localpart"
+ appservice_token: the acccess token for that application service.
+
+ Raises: if the request to '/register' does not return 200 OK.
+
+ Returns: the MXID of the new user.
+ """
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/register",
+ {
+ "username": username,
+ "type": "m.login.application_service",
+ },
+ access_token=appservice_token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ return channel.json_body["user_id"]
+
def login(
self,
username,
--
cgit 1.5.1
From 2d2c6a41fe69d4dab82a773bbffc52df95b6b542 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Mon, 4 Oct 2021 14:57:40 +0100
Subject: 1.44.0rc3
---
CHANGES.md | 10 ++++++++++
changelog.d/10933.bugfix | 1 -
changelog.d/10968.bugfix | 1 -
debian/changelog | 6 ++++++
synapse/__init__.py | 2 +-
5 files changed, 17 insertions(+), 3 deletions(-)
delete mode 100644 changelog.d/10933.bugfix
delete mode 100644 changelog.d/10968.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index 59ff967633..6c2728d407 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,13 @@
+Synapse 1.44.0rc3 (2021-10-04)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error. ([\#10933](https://github.com/matrix-org/synapse/issues/10933))
+- Fix `/admin/whois/{user_id}` endpoint, which was broken in v1.44.0rc1. ([\#10968](https://github.com/matrix-org/synapse/issues/10968))
+
+
Synapse 1.44.0rc2 (2021-09-30)
==============================
diff --git a/changelog.d/10933.bugfix b/changelog.d/10933.bugfix
deleted file mode 100644
index e0694fea22..0000000000
--- a/changelog.d/10933.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error.
diff --git a/changelog.d/10968.bugfix b/changelog.d/10968.bugfix
deleted file mode 100644
index 76624ed73c..0000000000
--- a/changelog.d/10968.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `/admin/whois/{user_id}` endpoint, which was broken in v1.44.0rc1.
diff --git a/debian/changelog b/debian/changelog
index b08a592780..a0f1bcbdf9 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.44.0~rc3) stable; urgency=medium
+
+ * New synapse release 1.44.0~rc3.
+
+ -- Synapse Packaging team Mon, 04 Oct 2021 14:57:22 +0100
+
matrix-synapse-py3 (1.44.0~rc2) stable; urgency=medium
* New synapse release 1.44.0~rc2.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 8791c20e26..a9a7b658b7 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.44.0rc2"
+__version__ = "1.44.0rc3"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
--
cgit 1.5.1
From 30f02404017231ed7e84667f3e1b85e2ed1ae348 Mon Sep 17 00:00:00 2001
From: AndrewFerr
Date: Mon, 4 Oct 2021 10:43:03 -0400
Subject: Make is_public Optional[bool] for create_room_as test util (#10951)
(#10963)
Signed-off-by: Andrew Ferrazzutti
---
changelog.d/10963.misc | 1 +
tests/rest/client/utils.py | 13 +++++++------
2 files changed, 8 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/10963.misc
diff --git a/changelog.d/10963.misc b/changelog.d/10963.misc
new file mode 100644
index 0000000000..daf40155de
--- /dev/null
+++ b/changelog.d/10963.misc
@@ -0,0 +1 @@
+Fix the test utility function `create_room_as` so that `is_public=True` will explicitly set the `visibility` parameter of room creation requests to `public`. Contributed by @AndrewFerr.
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index 3075d3f288..71fa87ce92 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -48,7 +48,7 @@ class RestHelper:
def create_room_as(
self,
room_creator: Optional[str] = None,
- is_public: bool = True,
+ is_public: Optional[bool] = None,
room_version: Optional[str] = None,
tok: Optional[str] = None,
expect_code: int = 200,
@@ -62,9 +62,10 @@ class RestHelper:
Args:
room_creator: The user ID to create the room with.
- is_public: If True, the `visibility` parameter will be set to the
- default (public). Otherwise, the `visibility` parameter will be set
- to "private".
+ is_public: If True, the `visibility` parameter will be set to
+ "public". If False, it will be set to "private". If left
+ unspecified, the server will set it to an appropriate default
+ (which should be "private" as per the CS spec).
room_version: The room version to create the room as. Defaults to Synapse's
default room version.
tok: The access token to use in the request.
@@ -77,8 +78,8 @@ class RestHelper:
self.auth_user_id = room_creator
path = "/_matrix/client/r0/createRoom"
content = extra_content or {}
- if not is_public:
- content["visibility"] = "private"
+ if is_public is not None:
+ content["visibility"] = "public" if is_public else "private"
if room_version:
content["room_version"] = room_version
if tok:
--
cgit 1.5.1
From eda8c88b84ee7506379a71ac2a7a88c08b759d43 Mon Sep 17 00:00:00 2001
From: Hillery Shay
Date: Mon, 4 Oct 2021 08:34:42 -0700
Subject: Add functionality to remove deactivated users from the
monthly_active_users table (#10947)
* add test
* add function to remove user from monthly active table in deactivate code
* add function to remove user from monthly active table
* add changelog entry
* update changelog number
* requested changes
* update docstring on new function
* fix lint error
* Update synapse/storage/databases/main/monthly_active_users.py
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10947.bugfix | 1 +
synapse/handlers/deactivate_account.py | 4 +++
.../storage/databases/main/monthly_active_users.py | 24 ++++++++++++++
tests/test_mau.py | 37 ++++++++++++++++++++--
4 files changed, 63 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/10947.bugfix
diff --git a/changelog.d/10947.bugfix b/changelog.d/10947.bugfix
new file mode 100644
index 0000000000..40c70d3ece
--- /dev/null
+++ b/changelog.d/10947.bugfix
@@ -0,0 +1 @@
+Fixes a long-standing bug wherin deactivated users still count towards the mau limit.
\ No newline at end of file
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 9ae5b7750e..12bdca7445 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -133,6 +133,10 @@ class DeactivateAccountHandler(BaseHandler):
# delete from user directory
await self.user_directory_handler.handle_local_user_deactivated(user_id)
+ # If the user is present in the monthly active users table
+ # remove them
+ await self.store.remove_deactivated_user_from_mau_table(user_id)
+
# Mark the user as erased, if they asked for that
if erase_data:
user = UserID.from_string(user_id)
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index a14ac03d4b..ec4d47a560 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -354,3 +354,27 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
await self.upsert_monthly_active_user(user_id)
elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY:
await self.upsert_monthly_active_user(user_id)
+
+ async def remove_deactivated_user_from_mau_table(self, user_id: str) -> None:
+ """
+ Removes a deactivated user from the monthly active user
+ table and resets affected caches.
+
+ Args:
+ user_id(str): the user_id to remove
+ """
+
+ rows_deleted = await self.db_pool.simple_delete(
+ table="monthly_active_users",
+ keyvalues={"user_id": user_id},
+ desc="simple_delete",
+ )
+
+ if rows_deleted != 0:
+ await self.invalidate_cache_and_stream(
+ "user_last_seen_monthly_active", (user_id,)
+ )
+ await self.invalidate_cache_and_stream("get_monthly_active_count", ())
+ await self.invalidate_cache_and_stream(
+ "get_monthly_active_count_by_service", ()
+ )
diff --git a/tests/test_mau.py b/tests/test_mau.py
index 80ab40e255..c683c8937e 100644
--- a/tests/test_mau.py
+++ b/tests/test_mau.py
@@ -13,11 +13,11 @@
# limitations under the License.
"""Tests REST events for /rooms paths."""
-
+import synapse.rest.admin
from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.appservice import ApplicationService
-from synapse.rest.client import register, sync
+from synapse.rest.client import login, profile, register, sync
from tests import unittest
from tests.unittest import override_config
@@ -26,7 +26,13 @@ from tests.utils import default_config
class TestMauLimit(unittest.HomeserverTestCase):
- servlets = [register.register_servlets, sync.register_servlets]
+ servlets = [
+ register.register_servlets,
+ sync.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ profile.register_servlets,
+ login.register_servlets,
+ ]
def default_config(self):
config = default_config("test")
@@ -229,6 +235,31 @@ class TestMauLimit(unittest.HomeserverTestCase):
self.reactor.advance(100)
self.assertEqual(2, self.successResultOf(count))
+ def test_deactivated_users_dont_count_towards_mau(self):
+ user1 = self.register_user("madonna", "password")
+ self.register_user("prince", "password2")
+ self.register_user("frodo", "onering", True)
+
+ token1 = self.login("madonna", "password")
+ token2 = self.login("prince", "password2")
+ admin_token = self.login("frodo", "onering")
+
+ self.do_sync_for_user(token1)
+ self.do_sync_for_user(token2)
+
+ # Check that mau count is what we expect
+ count = self.get_success(self.store.get_monthly_active_count())
+ self.assertEqual(count, 2)
+
+ # Deactivate user1
+ url = "/_synapse/admin/v1/deactivate/%s" % user1
+ channel = self.make_request("POST", url, access_token=admin_token)
+ self.assertIn("success", channel.json_body["id_server_unbind_result"])
+
+ # Check that deactivated user is no longer counted
+ count = self.get_success(self.store.get_monthly_active_count())
+ self.assertEqual(count, 1)
+
def create_user(self, localpart, token=None, appservice=False):
request_data = {
"username": localpart,
--
cgit 1.5.1
From 660c8c1415704f0b9c6fe17fe74d40dfefd78f0a Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Tue, 5 Oct 2021 12:23:25 +0100
Subject: Log stack traces when a missing opentracing span is detected (#10983)
Make it easier to track down where opentracing spans are going missing
by including stack traces in the logs.
---
changelog.d/10983.misc | 1 +
synapse/logging/opentracing.py | 1 +
2 files changed, 2 insertions(+)
create mode 100644 changelog.d/10983.misc
diff --git a/changelog.d/10983.misc b/changelog.d/10983.misc
new file mode 100644
index 0000000000..235899d14f
--- /dev/null
+++ b/changelog.d/10983.misc
@@ -0,0 +1 @@
+Log stack traces when a missing opentracing span is detected.
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 03d2dd94f6..5276c4bfcc 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -339,6 +339,7 @@ def ensure_active_span(message, ret=None):
"There was no active span when trying to %s."
" Did you forget to start one or did a context slip?",
message,
+ stack_info=True,
)
return ret
--
cgit 1.5.1
From 7036a7a60af54dec2e1ad5e4c31a450817a68147 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Tue, 5 Oct 2021 13:35:19 +0200
Subject: Update links to MSCs in documentation (#10991)
Based on matrix-doc switching from master -> main and
MSCs being merged.
---
changelog.d/10991.doc | 1 +
docs/MSC1711_certificates_FAQ.md | 4 ++--
docs/usage/administration/admin_api/registration_tokens.md | 3 ++-
3 files changed, 5 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/10991.doc
diff --git a/changelog.d/10991.doc b/changelog.d/10991.doc
new file mode 100644
index 0000000000..2f9bb24ca7
--- /dev/null
+++ b/changelog.d/10991.doc
@@ -0,0 +1 @@
+Update links to MSCs in documentation. Contributed by @dklimpel.
\ No newline at end of file
diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md
index 7d71c190ab..086899a9d8 100644
--- a/docs/MSC1711_certificates_FAQ.md
+++ b/docs/MSC1711_certificates_FAQ.md
@@ -3,7 +3,7 @@
## Historical Note
This document was originally written to guide server admins through the upgrade
path towards Synapse 1.0. Specifically,
-[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
+[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md)
required that all servers present valid TLS certificates on their federation
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
@@ -282,7 +282,7 @@ coffin of the Perspectives project (which was already pretty dead). So, the
Spec Core Team decided that a better approach would be to mandate valid TLS
certificates for federation alongside the rest of the Web. More details can be
found in
-[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
+[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
This results in a breaking change, which is disruptive, but absolutely critical
for the security model. However, the existence of Let's Encrypt as a trivial
diff --git a/docs/usage/administration/admin_api/registration_tokens.md b/docs/usage/administration/admin_api/registration_tokens.md
index 828c0277d6..c48d060dcc 100644
--- a/docs/usage/administration/admin_api/registration_tokens.md
+++ b/docs/usage/administration/admin_api/registration_tokens.md
@@ -1,7 +1,8 @@
# Registration Tokens
This API allows you to manage tokens which can be used to authenticate
-registration requests, as proposed in [MSC3231](https://github.com/govynnus/matrix-doc/blob/token-registration/proposals/3231-token-authenticated-registration.md).
+registration requests, as proposed in
+[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md).
To use it, you will need to enable the `registration_requires_token` config
option, and authenticate by providing an `access_token` for a server admin:
see [Admin API](../../usage/administration/admin_api).
--
cgit 1.5.1
From 6f6e9563387124eb4b4f324e4e1720291015a458 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Tue, 5 Oct 2021 12:43:04 +0100
Subject: Run CI with Python 3.10 and Postgres 14 (#10992)
---
.github/workflows/tests.yml | 14 +++++++-------
changelog.d/10992.misc | 1 +
2 files changed, 8 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/10992.misc
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index fa9c5e036a..96c39dd9a4 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -76,11 +76,11 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.6", "3.7", "3.8", "3.9"]
+ python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
include:
# Newest Python without optional deps
- - python-version: "3.9"
+ - python-version: "3.10"
toxenv: "py-noextras,combine"
# Oldest Python with PostgreSQL
@@ -88,10 +88,10 @@ jobs:
database: "postgres"
postgres-version: "9.6"
- # Newest Python with PostgreSQL
- - python-version: "3.9"
+ # Newest Python with newest PostgreSQL
+ - python-version: "3.10"
database: "postgres"
- postgres-version: "13"
+ postgres-version: "14"
steps:
- uses: actions/checkout@v2
@@ -256,8 +256,8 @@ jobs:
- python-version: "3.6"
postgres-version: "9.6"
- - python-version: "3.9"
- postgres-version: "13"
+ - python-version: "3.10"
+ postgres-version: "14"
services:
postgres:
diff --git a/changelog.d/10992.misc b/changelog.d/10992.misc
new file mode 100644
index 0000000000..60432a559c
--- /dev/null
+++ b/changelog.d/10992.misc
@@ -0,0 +1 @@
+Update GHA config to run tests against Python 3.10 and PostgreSQL 14.
--
cgit 1.5.1
From cb88ed912b3e984e0a409e4e5fd3c22817a4840d Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 5 Oct 2021 12:50:07 +0100
Subject: `_check_event_auth`: move event validation earlier (#10988)
There's little point in doing a fancy state reconciliation dance if the event
itself is invalid.
Likewise, there's no point checking it again in `_check_for_soft_fail`.
---
changelog.d/10988.misc | 1 +
synapse/handlers/federation_event.py | 13 +++++++++----
2 files changed, 10 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/10988.misc
diff --git a/changelog.d/10988.misc b/changelog.d/10988.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10988.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index e587b5b3b3..5938654338 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1250,9 +1250,18 @@ class FederationEventHandler:
# This method should only be used for non-outliers
assert not event.internal_metadata.outlier
+ # first of all, check that the event itself is valid.
room_version = await self._store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+ try:
+ validate_event_for_room_version(room_version_obj, event)
+ except AuthError as e:
+ logger.warning("While validating received event %r: %s", event, e)
+ # TODO: use a different rejected reason here?
+ context.rejected = RejectedReason.AUTH_ERROR
+ return context
+
# calculate what the auth events *should* be, to use as a basis for auth.
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
@@ -1286,7 +1295,6 @@ class FederationEventHandler:
auth_events_for_auth = calculated_auth_event_map
try:
- validate_event_for_room_version(room_version_obj, event)
check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth)
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
@@ -1399,9 +1407,6 @@ class FederationEventHandler:
}
try:
- # TODO: skip the call to validate_event_for_room_version? we should already
- # have validated the event.
- validate_event_for_room_version(room_version_obj, event)
check_auth_rules_for_event(room_version_obj, event, current_auth_events)
except AuthError as e:
logger.warning(
--
cgit 1.5.1
From d099535deb5be31891719c61c3757c5150829053 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 5 Oct 2021 12:50:38 +0100
Subject: `_update_auth_events_and_context_for_auth`: add some comments
(#10987)
Add some more comments about wtf is going on here.
---
changelog.d/10987.misc | 1 +
synapse/handlers/federation_event.py | 26 ++++++++++++++++++++++++++
2 files changed, 27 insertions(+)
create mode 100644 changelog.d/10987.misc
diff --git a/changelog.d/10987.misc b/changelog.d/10987.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10987.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 5938654338..aa20d75550 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1476,6 +1476,11 @@ class FederationEventHandler:
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
+ # missing_auth is now the set of event_ids which:
+ # a. are listed in event.auth_events, *and*
+ # b. are *not* part of our calculated auth events based on room state, *and*
+ # c. are *not* yet in our database.
+
if missing_auth:
# If we don't have all the auth events, we need to get them.
logger.info("auth_events contains unknown events: %s", missing_auth)
@@ -1497,10 +1502,31 @@ class FederationEventHandler:
}
)
+ # auth_events now contains
+ # 1. our *calculated* auth events based on the room state, plus:
+ # 2. any events which:
+ # a. are listed in `event.auth_events`, *and*
+ # b. are not part of our calculated auth events, *and*
+ # c. were not in our database before the call to /event_auth
+ # d. have since been added to our database (most likely by /event_auth).
+
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
+ # different_auth is the set of events which *are* in `event.auth_events`, but
+ # which are *not* in `auth_events`. Comparing with (2.) above, this means
+ # exclusively the set of `event.auth_events` which we already had in our
+ # database before any call to /event_auth.
+ #
+ # I'm reasonably sure that the fact that events returned by /event_auth are
+ # blindly added to auth_events (and hence excluded from different_auth) is a bug
+ # - though it's a very long-standing one (see
+ # https://github.com/matrix-org/synapse/commit/78015948a7febb18e000651f72f8f58830a55b93#diff-0bc92da3d703202f5b9be2d3f845e375f5b1a6bc6ba61705a8af9be1121f5e42R786
+ # from Jan 2015 which seems to add it, though it actually just moves it from
+ # elsewhere (before that, it gets lost in a mess of huge "various bug fixes"
+ # PRs).
+
if not different_auth:
return context, auth_events
--
cgit 1.5.1
From 787af4a1062fecc350fa14fe2abfc0e9d2f1555e Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 5 Oct 2021 13:01:41 +0100
Subject: Host `cache_joined_hosts_for_event` to caller (#10986)
`_check_event_auth` is only called in two places, and only one of those sets
`send_on_behalf_of`. Warming the cache isn't really part of auth anyway, so
moving it out makes a lot more sense.
---
changelog.d/10986.misc | 1 +
synapse/handlers/federation_event.py | 18 ++++++++----------
2 files changed, 9 insertions(+), 10 deletions(-)
create mode 100644 changelog.d/10986.misc
diff --git a/changelog.d/10986.misc b/changelog.d/10986.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/10986.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index aa20d75550..9269cb444d 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -356,6 +356,11 @@ class FederationEventHandler:
)
# all looks good, we can persist the event.
+
+ # First, precalculate the joined hosts so that the federation sender doesn't
+ # need to.
+ await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
+
await self._run_push_actions_and_persist_event(event, context)
return event, context
@@ -1299,17 +1304,10 @@ class FederationEventHandler:
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
+ return context
- if not context.rejected:
- await self._check_for_soft_fail(event, state, backfilled, origin=origin)
- await self._maybe_kick_guest_users(event)
-
- # If we are going to send this event over federation we precaclculate
- # the joined hosts.
- if event.internal_metadata.get_send_on_behalf_of():
- await self._event_creation_handler.cache_joined_hosts_for_event(
- event, context
- )
+ await self._check_for_soft_fail(event, state, backfilled, origin=origin)
+ await self._maybe_kick_guest_users(event)
return context
--
cgit 1.5.1
From 3a5b0cbe7ade000245695ec97c13ab5cb3565dc2 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 5 Oct 2021 13:23:29 +0100
Subject: Ensure that we reject events which use rejected events for auth
(#10956)
When we consider whether to accept events, we should not accept those which
depend on rejected events for their auth events.
This (together with earlier changes such as
https://github.com/matrix-org/synapse/pull/10771 and
https://github.com/matrix-org/synapse/pull/10896) forms a partial fix to
https://github.com/matrix-org/synapse/issues/9595. There still remain code
paths where we do not check the `auth_events` at all.
---
changelog.d/10956.bugfix | 1 +
synapse/event_auth.py | 6 ++++++
2 files changed, 7 insertions(+)
create mode 100644 changelog.d/10956.bugfix
diff --git a/changelog.d/10956.bugfix b/changelog.d/10956.bugfix
new file mode 100644
index 0000000000..13b8e5983b
--- /dev/null
+++ b/changelog.d/10956.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 7a1adc2750..ca0293a3dc 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -155,6 +155,12 @@ def check_auth_rules_for_event(
"which is in room %s"
% (event.event_id, room_id, auth_event.event_id, auth_event.room_id),
)
+ if auth_event.rejected_reason:
+ raise AuthError(
+ 403,
+ "During auth for event %s: found rejected event %s in the state"
+ % (event.event_id, auth_event.event_id),
+ )
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
#
--
cgit 1.5.1
From b2c5e79291b9f93cdb39c9a6f7de50e62f45e64e Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Tue, 5 Oct 2021 13:45:24 +0100
Subject: 1.44.0
---
CHANGES.md | 6 ++++++
debian/changelog | 6 ++++++
synapse/__init__.py | 2 +-
3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 6c2728d407..3f048ba881 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,9 @@
+Synapse 1.44.0 (2021-10-05)
+===========================
+
+No significant changes since 1.44.0rc3.
+
+
Synapse 1.44.0rc3 (2021-10-04)
==============================
diff --git a/debian/changelog b/debian/changelog
index a0f1bcbdf9..9e878fbc2d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.44.0) stable; urgency=medium
+
+ * New synapse release 1.44.0.
+
+ -- Synapse Packaging team Tue, 05 Oct 2021 13:43:57 +0100
+
matrix-synapse-py3 (1.44.0~rc3) stable; urgency=medium
* New synapse release 1.44.0~rc3.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index a9a7b658b7..b8979c365e 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.44.0rc3"
+__version__ = "1.44.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
--
cgit 1.5.1
From 392863fbf1ee31f8a1997446ab31919a7b6d9a14 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 5 Oct 2021 11:51:57 -0500
Subject: Fix logic flaw preventing tracking of MSC2716 events in existing room
versions (#10962)
We correctly allowed using the MSC2716 batch endpoint for
the room creator in existing room versions but accidentally didn't track
the events because of a logic flaw.
This prevented you from connecting subsequent chunks together because it would
throw the unknown batch ID error.
We only want to process MSC2716 events when:
- The room version supports MSC2716
- Any room where the homeserver has the `msc2716_enabled` experimental feature enabled and the event is from the room creator
---
changelog.d/10962.bugfix | 1 +
synapse/handlers/federation_event.py | 5 ++---
synapse/storage/databases/main/events.py | 10 ++++------
3 files changed, 7 insertions(+), 9 deletions(-)
create mode 100644 changelog.d/10962.bugfix
diff --git a/changelog.d/10962.bugfix b/changelog.d/10962.bugfix
new file mode 100644
index 0000000000..9b0760d731
--- /dev/null
+++ b/changelog.d/10962.bugfix
@@ -0,0 +1 @@
+Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 9269cb444d..243be46267 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1015,9 +1015,8 @@ class FederationEventHandler:
room_version = await self._store.get_room_version(marker_event.room_id)
create_event = await self._store.get_create_event_for_room(marker_event.room_id)
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
- if (
- not room_version.msc2716_historical
- or not self._config.experimental.msc2716_enabled
+ if not room_version.msc2716_historical and (
+ not self._config.experimental.msc2716_enabled
or marker_event.sender != room_creator
):
return
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index bc7d213fe2..19f55c19c5 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1763,9 +1763,8 @@ class PersistEventsStore:
retcol="creator",
allow_none=True,
)
- if (
- not room_version.msc2716_historical
- or not self.hs.config.experimental.msc2716_enabled
+ if not room_version.msc2716_historical and (
+ not self.hs.config.experimental.msc2716_enabled
or event.sender != room_creator
):
return
@@ -1825,9 +1824,8 @@ class PersistEventsStore:
retcol="creator",
allow_none=True,
)
- if (
- not room_version.msc2716_historical
- or not self.hs.config.experimental.msc2716_enabled
+ if not room_version.msc2716_historical and (
+ not self.hs.config.experimental.msc2716_enabled
or event.sender != room_creator
):
return
--
cgit 1.5.1
From 4f00432ce1a5571dd43f9ddc3ae128c58ae4d063 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Tue, 5 Oct 2021 18:35:25 +0100
Subject: Fix potential leak of per-room profiles when the user dir is rebuilt.
(#10981)
There are two steps to rebuilding the user directory:
1. a scan over rooms, followed by
2. a scan over local users.
The former reads avatars and display names from the `room_memberships`
table and therefore contains potentially private avatars and
display names. The latter reads from the the `profiles` table which only
contains public data; moreover it will overwrite any private profiles
that the rooms scan may have written to the user directory. This means
that the rebuild could leak private user while the rebuild was in
progress, only to later cover up the leaks once the rebuild had completed.
This change skips over local users when writing user_directory rows
when scanning rooms. Doing so means that it'll take longer for a rebuild
to make local users searchable, which is unfortunate. I think a future
PR can improve this by swapping the order of the two steps above. (And
indeed there's more to do here, e.g. copying from `profiles` without
going via Python.)
Small tidy-ups while I'm here:
* Remove duplicated code from test_initial. This was meant to be pulled into `purge_and_rebuild_user_dir`.
* Move `is_public` before updating sharing tables. No functional change; it's still before the first read of `is_public`.
* Don't bother creating a set from dict keys. Slightly nicer and makes the code simpler.
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10981.bugfix | 1 +
synapse/storage/databases/main/user_directory.py | 33 +++++----
tests/storage/test_user_directory.py | 94 ++++++++++++++++++++----
3 files changed, 99 insertions(+), 29 deletions(-)
create mode 100644 changelog.d/10981.bugfix
diff --git a/changelog.d/10981.bugfix b/changelog.d/10981.bugfix
new file mode 100644
index 0000000000..d7bf660348
--- /dev/null
+++ b/changelog.d/10981.bugfix
@@ -0,0 +1 @@
+Fix a bug that could leak local users' per-room nicknames and avatars when the user directory is rebuilt.
\ No newline at end of file
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 5f538947ec..5c713a732e 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -228,10 +228,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
is_in_room = await self.is_host_joined(room_id, self.server_name)
if is_in_room:
- is_public = await self.is_room_world_readable_or_publicly_joinable(
- room_id
- )
-
users_with_profile = await self.get_users_in_room_with_profiles(room_id)
# Throw away users excluded from the directory.
users_with_profile = {
@@ -241,22 +237,33 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
or await self.should_include_local_user_in_dir(user_id)
}
- # Update each user in the user directory.
+ # Upsert a user_directory record for each remote user we see.
for user_id, profile in users_with_profile.items():
+ # Local users are processed separately in
+ # `_populate_user_directory_users`; there we can read from
+ # the `profiles` table to ensure we don't leak their per-room
+ # profiles. It also means we write local users to this table
+ # exactly once, rather than once for every room they're in.
+ if self.hs.is_mine_id(user_id):
+ continue
+ # TODO `users_with_profile` above reads from the `user_directory`
+ # table, meaning that `profile` is bespoke to this room.
+ # and this leaks remote users' per-room profiles to the user directory.
await self.update_profile_in_user_dir(
user_id, profile.display_name, profile.avatar_url
)
- to_insert = set()
-
+ # Now update the room sharing tables to include this room.
+ is_public = await self.is_room_world_readable_or_publicly_joinable(
+ room_id
+ )
if is_public:
- for user_id in users_with_profile:
- to_insert.add(user_id)
-
- if to_insert:
- await self.add_users_in_public_rooms(room_id, to_insert)
- to_insert.clear()
+ if users_with_profile:
+ await self.add_users_in_public_rooms(
+ room_id, users_with_profile.keys()
+ )
else:
+ to_insert = set()
for user_id in users_with_profile:
# We want the set of pairs (L, M) where L and M are
# in `users_with_profile` and L is local.
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 6884ca9b7a..fddfb8db28 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -11,17 +11,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Dict, List, Set, Tuple
+from typing import Any, Dict, List, Set, Tuple
+from unittest import mock
from unittest.mock import Mock, patch
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.constants import UserTypes
+from synapse.api.constants import EventTypes, Membership, UserTypes
from synapse.appservice import ApplicationService
from synapse.rest import admin
from synapse.rest.client import login, register, room
from synapse.server import HomeServer
from synapse.storage import DataStore
+from synapse.storage.roommember import ProfileInfo
from synapse.util import Clock
from tests.test_utils.event_injection import inject_member_event
@@ -52,6 +54,11 @@ class GetUserDirectoryTables:
return r
async def get_users_in_public_rooms(self) -> List[Tuple[str, str]]:
+ """Fetch the entire `users_in_public_rooms` table.
+
+ Returns a list of tuples (user_id, room_id) where room_id is public and
+ contains the user with the given id.
+ """
r = await self.store.db_pool.simple_select_list(
"users_in_public_rooms", None, ("user_id", "room_id")
)
@@ -62,6 +69,13 @@ class GetUserDirectoryTables:
return retval
async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]:
+ """Fetch the entire `users_who_share_private_rooms` table.
+
+ Returns a dict containing "user_id", "other_user_id" and "room_id" keys.
+ The dicts can be flattened to Tuples with the `_compress_shared` method.
+ (This seems a little awkward---maybe we could clean this up.)
+ """
+
return await self.store.db_pool.simple_select_list(
"users_who_share_private_rooms",
None,
@@ -69,6 +83,10 @@ class GetUserDirectoryTables:
)
async def get_users_in_user_directory(self) -> Set[str]:
+ """Fetch the set of users in the `user_directory` table.
+
+ This is useful when checking we've correctly excluded users from the directory.
+ """
result = await self.store.db_pool.simple_select_list(
"user_directory",
None,
@@ -76,6 +94,25 @@ class GetUserDirectoryTables:
)
return {row["user_id"] for row in result}
+ async def get_profiles_in_user_directory(self) -> Dict[str, ProfileInfo]:
+ """Fetch users and their profiles from the `user_directory` table.
+
+ This is useful when we want to inspect display names and avatars.
+ It's almost the entire contents of the `user_directory` table: the only
+ thing missing is an unused room_id column.
+ """
+ rows = await self.store.db_pool.simple_select_list(
+ "user_directory",
+ None,
+ ("user_id", "display_name", "avatar_url"),
+ )
+ return {
+ row["user_id"]: ProfileInfo(
+ display_name=row["display_name"], avatar_url=row["avatar_url"]
+ )
+ for row in rows
+ }
+
class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
"""Ensure that rebuilding the directory writes the correct data to the DB.
@@ -201,20 +238,6 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token)
self.helper.join(private_room, user=u3, tok=u3_token)
- self.get_success(self.store.update_user_directory_stream_pos(None))
- self.get_success(self.store.delete_all_from_user_dir())
-
- shares_private = self.get_success(
- self.user_dir_helper.get_users_who_share_private_rooms()
- )
- public_users = self.get_success(
- self.user_dir_helper.get_users_in_public_rooms()
- )
-
- # Nothing updated yet
- self.assertEqual(shares_private, [])
- self.assertEqual(public_users, [])
-
# Do the initial population of the user directory via the background update
self._purge_and_rebuild_user_dir()
@@ -346,6 +369,45 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
# Check the AS user is not in the directory.
self._check_room_sharing_tables(user, public, private)
+ def test_population_conceals_private_nickname(self) -> None:
+ # Make a private room, and set a nickname within
+ user = self.register_user("aaaa", "pass")
+ user_token = self.login(user, "pass")
+ private_room = self.helper.create_room_as(user, is_public=False, tok=user_token)
+ self.helper.send_state(
+ private_room,
+ EventTypes.Member,
+ state_key=user,
+ body={"membership": Membership.JOIN, "displayname": "BBBB"},
+ tok=user_token,
+ )
+
+ # Rebuild the user directory. Make the rescan of the `users` table a no-op
+ # so we only see the effect of scanning the `room_memberships` table.
+ async def mocked_process_users(*args: Any, **kwargs: Any) -> int:
+ await self.store.db_pool.updates._end_background_update(
+ "populate_user_directory_process_users"
+ )
+ return 1
+
+ with mock.patch.dict(
+ self.store.db_pool.updates._background_update_handlers,
+ populate_user_directory_process_users=mocked_process_users,
+ ):
+ self._purge_and_rebuild_user_dir()
+
+ # Local users are ignored by the scan over rooms
+ users = self.get_success(self.user_dir_helper.get_profiles_in_user_directory())
+ self.assertEqual(users, {})
+
+ # Do a full rebuild including the scan over the `users` table. The local
+ # user should appear with their profile name.
+ self._purge_and_rebuild_user_dir()
+ users = self.get_success(self.user_dir_helper.get_profiles_in_user_directory())
+ self.assertEqual(
+ users, {user: ProfileInfo(display_name="aaaa", avatar_url=None)}
+ )
+
class UserDirectoryStoreTestCase(HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
--
cgit 1.5.1
From 6744273f0b92457247166299981d69e9f4219601 Mon Sep 17 00:00:00 2001
From: Max Kratz
Date: Wed, 6 Oct 2021 12:05:07 +0200
Subject: Remove "reference" wording according Synapse homeserver (#10971)
---
README.rst | 7 ++-----
changelog.d/10971.doc | 1 +
docs/README.md | 6 +++---
docs/welcome_and_overview.md | 5 +++--
4 files changed, 9 insertions(+), 10 deletions(-)
create mode 100644 changelog.d/10971.doc
diff --git a/README.rst b/README.rst
index 524a3a5142..63deb06eac 100644
--- a/README.rst
+++ b/README.rst
@@ -55,11 +55,8 @@ solutions. The hope is for Matrix to act as the building blocks for a new
generation of fully open and interoperable messaging and VoIP apps for the
internet.
-Synapse is a reference "homeserver" implementation of Matrix from the core
-development team at matrix.org, written in Python/Twisted. It is intended to
-showcase the concept of Matrix and let folks see the spec in the context of a
-codebase and let you run your own homeserver and generally help bootstrap the
-ecosystem.
+Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
+team, written in Python 3/Twisted.
In Matrix, every user runs one or more Matrix clients, which connect through to
a Matrix homeserver. The homeserver stores all their personal chat history and
diff --git a/changelog.d/10971.doc b/changelog.d/10971.doc
new file mode 100644
index 0000000000..cc6cfe4164
--- /dev/null
+++ b/changelog.d/10971.doc
@@ -0,0 +1 @@
+Change wording ("reference homeserver") in Synapse repository documentation. Contributed by @maxkratz.
diff --git a/docs/README.md b/docs/README.md
index e113f55d2a..6d70f5afff 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -6,9 +6,9 @@ Please update any links to point to the new website instead.
## About
This directory currently holds a series of markdown files documenting how to install, use
-and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
-from this repository, but it is recommended to instead browse through the
-[website](https://matrix-org.github.io/synapse) for easier discoverability.
+and develop Synapse. The documentation is readable directly from this repository, but it is
+recommended to instead browse through the [website](https://matrix-org.github.io/synapse) for
+easier discoverability.
## Adding to the documentation
diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md
index 30e75984d1..9882d9f159 100644
--- a/docs/welcome_and_overview.md
+++ b/docs/welcome_and_overview.md
@@ -1,4 +1,5 @@
# Introduction
-Welcome to the documentation repository for Synapse, the reference
-[Matrix](https://matrix.org) homeserver implementation.
\ No newline at end of file
+Welcome to the documentation repository for Synapse, a
+[Matrix](https://matrix.org) homeserver implementation developed by the matrix.org core
+team.
--
cgit 1.5.1
From f8d0f72b27e158738f3c75a38399b967f7478011 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Wed, 6 Oct 2021 11:20:49 +0100
Subject: More types for synapse.util, part 1 (#10888)
The following modules now pass `disallow_untyped_defs`:
* synapse.util.caches.cached_call
* synapse.util.caches.lrucache
* synapse.util.caches.response_cache
* synapse.util.caches.stream_change_cache
* synapse.util.caches.ttlcache pass
* synapse.util.daemonize
* synapse.util.patch_inline_callbacks pass `no-untyped-defs`
* synapse.util.versionstring
Additional typing in synapse.util.metrics. Didn't get this to pass `no-untyped-defs`, think I'll need to watch #10847
---
changelog.d/10888.misc | 1 +
mypy.ini | 24 +++++++++++++
synapse/util/caches/cached_call.py | 2 +-
synapse/util/caches/deferred_cache.py | 11 +++---
synapse/util/caches/lrucache.py | 57 ++++++++++++++----------------
synapse/util/caches/response_cache.py | 6 ++--
synapse/util/caches/stream_change_cache.py | 6 ++--
synapse/util/caches/ttlcache.py | 12 +++----
synapse/util/daemonize.py | 8 +++--
synapse/util/metrics.py | 27 ++++++++++----
synapse/util/patch_inline_callbacks.py | 28 ++++++++++-----
synapse/util/versionstring.py | 25 +++++++++----
12 files changed, 134 insertions(+), 73 deletions(-)
create mode 100644 changelog.d/10888.misc
diff --git a/changelog.d/10888.misc b/changelog.d/10888.misc
new file mode 100644
index 0000000000..d9c9917881
--- /dev/null
+++ b/changelog.d/10888.misc
@@ -0,0 +1 @@
+Improve type hinting in `synapse.util`.
\ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
index 568166db33..86459bdcb6 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -102,9 +102,27 @@ disallow_untyped_defs = True
[mypy-synapse.util.batching_queue]
disallow_untyped_defs = True
+[mypy-synapse.util.caches.cached_call]
+disallow_untyped_defs = True
+
[mypy-synapse.util.caches.dictionary_cache]
disallow_untyped_defs = True
+[mypy-synapse.util.caches.lrucache]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.caches.response_cache]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.caches.stream_change_cache]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.caches.ttl_cache]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.daemonize]
+disallow_untyped_defs = True
+
[mypy-synapse.util.file_consumer]
disallow_untyped_defs = True
@@ -141,6 +159,9 @@ disallow_untyped_defs = True
[mypy-synapse.util.msisdn]
disallow_untyped_defs = True
+[mypy-synapse.util.patch_inline_callbacks]
+disallow_untyped_defs = True
+
[mypy-synapse.util.ratelimitutils]
disallow_untyped_defs = True
@@ -162,6 +183,9 @@ disallow_untyped_defs = True
[mypy-synapse.util.wheel_timer]
disallow_untyped_defs = True
+[mypy-synapse.util.versionstring]
+disallow_untyped_defs = True
+
[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py
index e58dd91eda..470f4f91a5 100644
--- a/synapse/util/caches/cached_call.py
+++ b/synapse/util/caches/cached_call.py
@@ -85,7 +85,7 @@ class CachedCall(Generic[TV]):
# result in the deferred, since `awaiting` a deferred destroys its result.
# (Also, if it's a Failure, GCing the deferred would log a critical error
# about unhandled Failures)
- def got_result(r):
+ def got_result(r: Union[TV, Failure]) -> None:
self._result = r
self._deferred.addBoth(got_result)
diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index 6262efe072..da502aec11 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -31,6 +31,7 @@ from prometheus_client import Gauge
from twisted.internet import defer
from twisted.python import failure
+from twisted.python.failure import Failure
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.lrucache import LruCache
@@ -112,7 +113,7 @@ class DeferredCache(Generic[KT, VT]):
self.thread: Optional[threading.Thread] = None
@property
- def max_entries(self):
+ def max_entries(self) -> int:
return self.cache.max_size
def check_thread(self) -> None:
@@ -258,7 +259,7 @@ class DeferredCache(Generic[KT, VT]):
return False
- def cb(result) -> None:
+ def cb(result: VT) -> None:
if compare_and_pop():
self.cache.set(key, result, entry.callbacks)
else:
@@ -270,7 +271,7 @@ class DeferredCache(Generic[KT, VT]):
# not have been. Either way, let's double-check now.
entry.invalidate()
- def eb(_fail) -> None:
+ def eb(_fail: Failure) -> None:
compare_and_pop()
entry.invalidate()
@@ -284,11 +285,11 @@ class DeferredCache(Generic[KT, VT]):
def prefill(
self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None
- ):
+ ) -> None:
callbacks = [callback] if callback else []
self.cache.set(key, value, callbacks=callbacks)
- def invalidate(self, key):
+ def invalidate(self, key) -> None:
"""Delete a key, or tree of entries
If the cache is backed by a regular dict, then "key" must be of
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 4ff62b403f..a0a7a9de32 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -52,7 +52,7 @@ logger = logging.getLogger(__name__)
try:
from pympler.asizeof import Asizer
- def _get_size_of(val: Any, *, recurse=True) -> int:
+ def _get_size_of(val: Any, *, recurse: bool = True) -> int:
"""Get an estimate of the size in bytes of the object.
Args:
@@ -71,7 +71,7 @@ try:
except ImportError:
- def _get_size_of(val: Any, *, recurse=True) -> int:
+ def _get_size_of(val: Any, *, recurse: bool = True) -> int:
return 0
@@ -85,15 +85,6 @@ VT = TypeVar("VT")
# a general type var, distinct from either KT or VT
T = TypeVar("T")
-
-def enumerate_leaves(node, depth):
- if depth == 0:
- yield node
- else:
- for n in node.values():
- yield from enumerate_leaves(n, depth - 1)
-
-
P = TypeVar("P")
@@ -102,7 +93,7 @@ class _TimedListNode(ListNode[P]):
__slots__ = ["last_access_ts_secs"]
- def update_last_access(self, clock: Clock):
+ def update_last_access(self, clock: Clock) -> None:
self.last_access_ts_secs = int(clock.time())
@@ -115,7 +106,7 @@ GLOBAL_ROOT = ListNode["_Node"].create_root_node()
@wrap_as_background_process("LruCache._expire_old_entries")
-async def _expire_old_entries(clock: Clock, expiry_seconds: int):
+async def _expire_old_entries(clock: Clock, expiry_seconds: int) -> None:
"""Walks the global cache list to find cache entries that haven't been
accessed in the given number of seconds.
"""
@@ -163,7 +154,7 @@ async def _expire_old_entries(clock: Clock, expiry_seconds: int):
logger.info("Dropped %d items from caches", i)
-def setup_expire_lru_cache_entries(hs: "HomeServer"):
+def setup_expire_lru_cache_entries(hs: "HomeServer") -> None:
"""Start a background job that expires all cache entries if they have not
been accessed for the given number of seconds.
"""
@@ -183,7 +174,7 @@ def setup_expire_lru_cache_entries(hs: "HomeServer"):
)
-class _Node:
+class _Node(Generic[KT, VT]):
__slots__ = [
"_list_node",
"_global_list_node",
@@ -197,8 +188,8 @@ class _Node:
def __init__(
self,
root: "ListNode[_Node]",
- key,
- value,
+ key: KT,
+ value: VT,
cache: "weakref.ReferenceType[LruCache]",
clock: Clock,
callbacks: Collection[Callable[[], None]] = (),
@@ -409,7 +400,7 @@ class LruCache(Generic[KT, VT]):
def synchronized(f: FT) -> FT:
@wraps(f)
- def inner(*args, **kwargs):
+ def inner(*args: Any, **kwargs: Any) -> Any:
with lock:
return f(*args, **kwargs)
@@ -418,17 +409,19 @@ class LruCache(Generic[KT, VT]):
cached_cache_len = [0]
if size_callback is not None:
- def cache_len():
+ def cache_len() -> int:
return cached_cache_len[0]
else:
- def cache_len():
+ def cache_len() -> int:
return len(cache)
self.len = synchronized(cache_len)
- def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()):
+ def add_node(
+ key: KT, value: VT, callbacks: Collection[Callable[[], None]] = ()
+ ) -> None:
node = _Node(
list_root,
key,
@@ -446,7 +439,7 @@ class LruCache(Generic[KT, VT]):
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.inc_memory_usage(node.memory)
- def move_node_to_front(node: _Node):
+ def move_node_to_front(node: _Node) -> None:
node.move_to_front(real_clock, list_root)
def delete_node(node: _Node) -> int:
@@ -488,7 +481,7 @@ class LruCache(Generic[KT, VT]):
default: Optional[T] = None,
callbacks: Collection[Callable[[], None]] = (),
update_metrics: bool = True,
- ):
+ ) -> Union[None, T, VT]:
node = cache.get(key, None)
if node is not None:
move_node_to_front(node)
@@ -502,7 +495,9 @@ class LruCache(Generic[KT, VT]):
return default
@synchronized
- def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()):
+ def cache_set(
+ key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()
+ ) -> None:
node = cache.get(key, None)
if node is not None:
# We sometimes store large objects, e.g. dicts, which cause
@@ -547,7 +542,7 @@ class LruCache(Generic[KT, VT]):
...
@synchronized
- def cache_pop(key: KT, default: Optional[T] = None):
+ def cache_pop(key: KT, default: Optional[T] = None) -> Union[None, T, VT]:
node = cache.get(key, None)
if node:
delete_node(node)
@@ -612,25 +607,25 @@ class LruCache(Generic[KT, VT]):
self.contains = cache_contains
self.clear = cache_clear
- def __getitem__(self, key):
+ def __getitem__(self, key: KT) -> VT:
result = self.get(key, self.sentinel)
if result is self.sentinel:
raise KeyError()
else:
- return result
+ return cast(VT, result)
- def __setitem__(self, key, value):
+ def __setitem__(self, key: KT, value: VT) -> None:
self.set(key, value)
- def __delitem__(self, key, value):
+ def __delitem__(self, key: KT, value: VT) -> None:
result = self.pop(key, self.sentinel)
if result is self.sentinel:
raise KeyError()
- def __len__(self):
+ def __len__(self) -> int:
return self.len()
- def __contains__(self, key):
+ def __contains__(self, key: KT) -> bool:
return self.contains(key)
def set_cache_factor(self, factor: float) -> bool:
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index ed7204336f..88ccf44337 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -104,8 +104,8 @@ class ResponseCache(Generic[KV]):
return None
def _set(
- self, context: ResponseCacheContext[KV], deferred: defer.Deferred
- ) -> defer.Deferred:
+ self, context: ResponseCacheContext[KV], deferred: "defer.Deferred[RV]"
+ ) -> "defer.Deferred[RV]":
"""Set the entry for the given key to the given deferred.
*deferred* should run its callbacks in the sentinel logcontext (ie,
@@ -126,7 +126,7 @@ class ResponseCache(Generic[KV]):
key = context.cache_key
self.pending_result_cache[key] = result
- def on_complete(r):
+ def on_complete(r: RV) -> RV:
# if this cache has a non-zero timeout, and the callback has not cleared
# the should_cache bit, we leave it in the cache for now and schedule
# its removal later.
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 27b1da235e..330709b8b7 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -40,10 +40,10 @@ class StreamChangeCache:
self,
name: str,
current_stream_pos: int,
- max_size=10000,
+ max_size: int = 10000,
prefilled_cache: Optional[Mapping[EntityType, int]] = None,
- ):
- self._original_max_size = max_size
+ ) -> None:
+ self._original_max_size: int = max_size
self._max_size = math.floor(max_size)
self._entity_to_key: Dict[EntityType, int] = {}
diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py
index 46afe3f934..0b9ac26b69 100644
--- a/synapse/util/caches/ttlcache.py
+++ b/synapse/util/caches/ttlcache.py
@@ -159,12 +159,12 @@ class TTLCache(Generic[KT, VT]):
del self._expiry_list[0]
-@attr.s(frozen=True, slots=True)
-class _CacheEntry:
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class _CacheEntry: # Should be Generic[KT, VT]. See python-attrs/attrs#313
"""TTLCache entry"""
# expiry_time is the first attribute, so that entries are sorted by expiry.
- expiry_time = attr.ib(type=float)
- ttl = attr.ib(type=float)
- key = attr.ib()
- value = attr.ib()
+ expiry_time: float
+ ttl: float
+ key: Any # should be KT
+ value: Any # should be VT
diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py
index f1a351cfd4..de04f34e4e 100644
--- a/synapse/util/daemonize.py
+++ b/synapse/util/daemonize.py
@@ -19,6 +19,8 @@ import logging
import os
import signal
import sys
+from types import FrameType, TracebackType
+from typing import NoReturn, Type
def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None:
@@ -97,7 +99,9 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
# (we don't normally expect reactor.run to raise any exceptions, but this will
# also catch any other uncaught exceptions before we get that far.)
- def excepthook(type_, value, traceback):
+ def excepthook(
+ type_: Type[BaseException], value: BaseException, traceback: TracebackType
+ ) -> None:
logger.critical("Unhanded exception", exc_info=(type_, value, traceback))
sys.excepthook = excepthook
@@ -119,7 +123,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
sys.exit(1)
# write a log line on SIGTERM.
- def sigterm(signum, frame):
+ def sigterm(signum: signal.Signals, frame: FrameType) -> NoReturn:
logger.warning("Caught signal %s. Stopping daemon." % signum)
sys.exit(0)
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 1b82dca81b..1e784b3f1f 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -14,9 +14,11 @@
import logging
from functools import wraps
-from typing import Any, Callable, Optional, TypeVar, cast
+from types import TracebackType
+from typing import Any, Callable, Optional, Type, TypeVar, cast
from prometheus_client import Counter
+from typing_extensions import Protocol
from synapse.logging.context import (
ContextResourceUsage,
@@ -24,6 +26,7 @@ from synapse.logging.context import (
current_context,
)
from synapse.metrics import InFlightGauge
+from synapse.util import Clock
logger = logging.getLogger(__name__)
@@ -64,6 +67,10 @@ in_flight = InFlightGauge(
T = TypeVar("T", bound=Callable[..., Any])
+class HasClock(Protocol):
+ clock: Clock
+
+
def measure_func(name: Optional[str] = None) -> Callable[[T], T]:
"""
Used to decorate an async function with a `Measure` context manager.
@@ -86,7 +93,7 @@ def measure_func(name: Optional[str] = None) -> Callable[[T], T]:
block_name = func.__name__ if name is None else name
@wraps(func)
- async def measured_func(self, *args, **kwargs):
+ async def measured_func(self: HasClock, *args: Any, **kwargs: Any) -> Any:
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
@@ -104,10 +111,10 @@ class Measure:
"start",
]
- def __init__(self, clock, name: str):
+ def __init__(self, clock: Clock, name: str) -> None:
"""
Args:
- clock: A n object with a "time()" method, which returns the current
+ clock: An object with a "time()" method, which returns the current
time in seconds.
name: The name of the metric to report.
"""
@@ -124,7 +131,7 @@ class Measure:
assert isinstance(curr_context, LoggingContext)
parent_context = curr_context
self._logging_context = LoggingContext(str(curr_context), parent_context)
- self.start: Optional[int] = None
+ self.start: Optional[float] = None
def __enter__(self) -> "Measure":
if self.start is not None:
@@ -138,7 +145,12 @@ class Measure:
return self
- def __exit__(self, exc_type, exc_val, exc_tb):
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
if self.start is None:
raise RuntimeError("Measure() block exited without being entered")
@@ -168,8 +180,9 @@ class Measure:
"""
return self._logging_context.get_resource_usage()
- def _update_in_flight(self, metrics):
+ def _update_in_flight(self, metrics) -> None:
"""Gets called when processing in flight metrics"""
+ assert self.start is not None
duration = self.clock.time() - self.start
metrics.real_time_max = max(metrics.real_time_max, duration)
diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index 9dd010af3b..1f18654d47 100644
--- a/synapse/util/patch_inline_callbacks.py
+++ b/synapse/util/patch_inline_callbacks.py
@@ -14,7 +14,7 @@
import functools
import sys
-from typing import Any, Callable, List
+from typing import Any, Callable, Generator, List, TypeVar
from twisted.internet import defer
from twisted.internet.defer import Deferred
@@ -24,6 +24,9 @@ from twisted.python.failure import Failure
_already_patched = False
+T = TypeVar("T")
+
+
def do_patch() -> None:
"""
Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit
@@ -37,15 +40,19 @@ def do_patch() -> None:
if _already_patched:
return
- def new_inline_callbacks(f):
+ def new_inline_callbacks(
+ f: Callable[..., Generator["Deferred[object]", object, T]]
+ ) -> Callable[..., "Deferred[T]"]:
@functools.wraps(f)
- def wrapped(*args, **kwargs):
+ def wrapped(*args: Any, **kwargs: Any) -> "Deferred[T]":
start_context = current_context()
changes: List[str] = []
- orig = orig_inline_callbacks(_check_yield_points(f, changes))
+ orig: Callable[..., "Deferred[T]"] = orig_inline_callbacks(
+ _check_yield_points(f, changes)
+ )
try:
- res = orig(*args, **kwargs)
+ res: "Deferred[T]" = orig(*args, **kwargs)
except Exception:
if current_context() != start_context:
for err in changes:
@@ -84,7 +91,7 @@ def do_patch() -> None:
print(err, file=sys.stderr)
raise Exception(err)
- def check_ctx(r):
+ def check_ctx(r: T) -> T:
if current_context() != start_context:
for err in changes:
print(err, file=sys.stderr)
@@ -107,7 +114,10 @@ def do_patch() -> None:
_already_patched = True
-def _check_yield_points(f: Callable, changes: List[str]) -> Callable:
+def _check_yield_points(
+ f: Callable[..., Generator["Deferred[object]", object, T]],
+ changes: List[str],
+) -> Callable:
"""Wraps a generator that is about to be passed to defer.inlineCallbacks
checking that after every yield the log contexts are correct.
@@ -127,7 +137,9 @@ def _check_yield_points(f: Callable, changes: List[str]) -> Callable:
from synapse.logging.context import current_context
@functools.wraps(f)
- def check_yield_points_inner(*args, **kwargs):
+ def check_yield_points_inner(
+ *args: Any, **kwargs: Any
+ ) -> Generator["Deferred[object]", object, T]:
gen = f(*args, **kwargs)
last_yield_line_no = gen.gi_frame.f_lineno
diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py
index 1c20b24bbe..899ee0adc8 100644
--- a/synapse/util/versionstring.py
+++ b/synapse/util/versionstring.py
@@ -15,14 +15,18 @@
import logging
import os
import subprocess
+from types import ModuleType
+from typing import Dict
logger = logging.getLogger(__name__)
+version_cache: Dict[ModuleType, str] = {}
-def get_version_string(module) -> str:
+
+def get_version_string(module: ModuleType) -> str:
"""Given a module calculate a git-aware version string for it.
- If called on a module not in a git checkout will return `__verison__`.
+ If called on a module not in a git checkout will return `__version__`.
Args:
module (module)
@@ -31,11 +35,13 @@ def get_version_string(module) -> str:
str
"""
- cached_version = getattr(module, "_synapse_version_string_cache", None)
- if cached_version:
+ cached_version = version_cache.get(module)
+ if cached_version is not None:
return cached_version
- version_string = module.__version__
+ # We want this to fail loudly with an AttributeError. Type-ignore this so
+ # mypy only considers the happy path.
+ version_string = module.__version__ # type: ignore[attr-defined]
try:
null = open(os.devnull, "w")
@@ -97,10 +103,15 @@ def get_version_string(module) -> str:
s for s in (git_branch, git_tag, git_commit, git_dirty) if s
)
- version_string = "%s (%s)" % (module.__version__, git_version)
+ version_string = "%s (%s)" % (
+ # If the __version__ attribute doesn't exist, we'll have failed
+ # loudly above.
+ module.__version__, # type: ignore[attr-defined]
+ git_version,
+ )
except Exception as e:
logger.info("Failed to check for git repository: %s", e)
- module._synapse_version_string_cache = version_string
+ version_cache[module] = version_string
return version_string
--
cgit 1.5.1
From c80878d22a013ed68d3929025bbd40074e66af01 Mon Sep 17 00:00:00 2001
From: Nick Barrett
Date: Wed, 6 Oct 2021 11:26:18 +0100
Subject: Add `--run-background-updates` option to `update_database` script.
(#10954)
Signed-off-by: Nick Barrett
---
.ci/scripts/test_synapse_port_db.sh | 4 +-
changelog.d/10954.feature | 1 +
debian/changelog | 7 +++
debian/matrix-synapse-py3.links | 1 +
scripts-dev/lint.sh | 2 +-
scripts-dev/make_full_schema.sh | 2 +-
scripts-dev/update_database | 100 -------------------------------
scripts/update_synapse_database | 116 ++++++++++++++++++++++++++++++++++++
tox.ini | 2 +-
9 files changed, 130 insertions(+), 105 deletions(-)
create mode 100644 changelog.d/10954.feature
delete mode 100755 scripts-dev/update_database
create mode 100755 scripts/update_synapse_database
diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh
index 2b4e5ec170..50115b3079 100755
--- a/.ci/scripts/test_synapse_port_db.sh
+++ b/.ci/scripts/test_synapse_port_db.sh
@@ -25,7 +25,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
-scripts-dev/update_database --database-config .ci/sqlite-config.yaml
+scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
@@ -46,7 +46,7 @@ echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
-scripts-dev/update_database --database-config .ci/sqlite-config.yaml
+scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
.ci/scripts/postgres_exec.py \
diff --git a/changelog.d/10954.feature b/changelog.d/10954.feature
new file mode 100644
index 0000000000..94dfa7175c
--- /dev/null
+++ b/changelog.d/10954.feature
@@ -0,0 +1 @@
+Include an `update_synapse_database` script in the distribution. Contributed by @Fizzadar at Beeper.
diff --git a/debian/changelog b/debian/changelog
index 9e878fbc2d..8e80c78ee7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+matrix-synapse-py3 (1.44.0~rc2+nmu1) UNRELEASED; urgency=medium
+
+ [ Nick @ Beeper ]
+ * Include an `update_synapse_database` script in the distribution.
+
+ -- root Mon, 04 Oct 2021 13:29:26 +0000
+
matrix-synapse-py3 (1.44.0) stable; urgency=medium
* New synapse release 1.44.0.
diff --git a/debian/matrix-synapse-py3.links b/debian/matrix-synapse-py3.links
index 53e2965418..7eeba180d9 100644
--- a/debian/matrix-synapse-py3.links
+++ b/debian/matrix-synapse-py3.links
@@ -3,3 +3,4 @@ opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matri
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
+opt/venvs/matrix-synapse/bin/update_synapse_database usr/bin/update_synapse_database
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 809eff166a..b6554a73c1 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -90,10 +90,10 @@ else
"scripts/hash_password"
"scripts/register_new_matrix_user"
"scripts/synapse_port_db"
+ "scripts/update_synapse_database"
"scripts-dev"
"scripts-dev/build_debian_packages"
"scripts-dev/sign_json"
- "scripts-dev/update_database"
"contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
)
fi
diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh
index 39bf30d258..c3c90f4ec6 100755
--- a/scripts-dev/make_full_schema.sh
+++ b/scripts-dev/make_full_schema.sh
@@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
echo "Running db background jobs..."
-scripts-dev/update_database --database-config "$SQLITE_CONFIG"
+scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG"
# Create the PostgreSQL database.
echo "Creating postgres database..."
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
deleted file mode 100755
index 87f709b6ed..0000000000
--- a/scripts-dev/update_database
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2019 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import argparse
-import logging
-import sys
-
-import yaml
-
-from twisted.internet import defer, reactor
-
-import synapse
-from synapse.config.homeserver import HomeServerConfig
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.server import HomeServer
-from synapse.storage import DataStore
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("update_database")
-
-
-class MockHomeserver(HomeServer):
- DATASTORE_CLASS = DataStore
-
- def __init__(self, config, **kwargs):
- super(MockHomeserver, self).__init__(
- config.server_name, reactor=reactor, config=config, **kwargs
- )
-
- self.version_string = "Synapse/" + get_version_string(synapse)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description=(
- "Updates a synapse database to the latest schema and runs background updates"
- " on it."
- )
- )
- parser.add_argument("-v", action="store_true")
- parser.add_argument(
- "--database-config",
- type=argparse.FileType("r"),
- required=True,
- help="A database config file for either a SQLite3 database or a PostgreSQL one.",
- )
-
- args = parser.parse_args()
-
- logging_config = {
- "level": logging.DEBUG if args.v else logging.INFO,
- "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
- }
-
- logging.basicConfig(**logging_config)
-
- # Load, process and sanity-check the config.
- hs_config = yaml.safe_load(args.database_config)
-
- if "database" not in hs_config:
- sys.stderr.write("The configuration file must have a 'database' section.\n")
- sys.exit(4)
-
- config = HomeServerConfig()
- config.parse_config_dict(hs_config, "", "")
-
- # Instantiate and initialise the homeserver object.
- hs = MockHomeserver(config)
-
- # Setup instantiates the store within the homeserver object and updates the
- # DB.
- hs.setup()
- store = hs.get_datastore()
-
- async def run_background_updates():
- await store.db_pool.updates.run_background_updates(sleep=False)
- # Stop the reactor to exit the script once every background update is run.
- reactor.stop()
-
- def run():
- # Apply all background updates on the database.
- defer.ensureDeferred(
- run_as_background_process("background_updates", run_background_updates)
- )
-
- reactor.callWhenRunning(run)
-
- reactor.run()
diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database
new file mode 100755
index 0000000000..26b29b0b45
--- /dev/null
+++ b/scripts/update_synapse_database
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import sys
+
+import yaml
+
+from twisted.internet import defer, reactor
+
+import synapse
+from synapse.config.homeserver import HomeServerConfig
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.server import HomeServer
+from synapse.storage import DataStore
+from synapse.util.versionstring import get_version_string
+
+logger = logging.getLogger("update_database")
+
+
+class MockHomeserver(HomeServer):
+ DATASTORE_CLASS = DataStore
+
+ def __init__(self, config, **kwargs):
+ super(MockHomeserver, self).__init__(
+ config.server_name, reactor=reactor, config=config, **kwargs
+ )
+
+ self.version_string = "Synapse/" + get_version_string(synapse)
+
+
+def run_background_updates(hs):
+ store = hs.get_datastore()
+
+ async def run_background_updates():
+ await store.db_pool.updates.run_background_updates(sleep=False)
+ # Stop the reactor to exit the script once every background update is run.
+ reactor.stop()
+
+ def run():
+ # Apply all background updates on the database.
+ defer.ensureDeferred(
+ run_as_background_process("background_updates", run_background_updates)
+ )
+
+ reactor.callWhenRunning(run)
+
+ reactor.run()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description=(
+ "Updates a synapse database to the latest schema and optionally runs background updates"
+ " on it."
+ )
+ )
+ parser.add_argument("-v", action="store_true")
+ parser.add_argument(
+ "--database-config",
+ type=argparse.FileType("r"),
+ required=True,
+ help="Synapse configuration file, giving the details of the database to be updated",
+ )
+ parser.add_argument(
+ "--run-background-updates",
+ action="store_true",
+ required=False,
+ help="run background updates after upgrading the database schema",
+ )
+
+ args = parser.parse_args()
+
+ logging_config = {
+ "level": logging.DEBUG if args.v else logging.INFO,
+ "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
+ }
+
+ logging.basicConfig(**logging_config)
+
+ # Load, process and sanity-check the config.
+ hs_config = yaml.safe_load(args.database_config)
+
+ if "database" not in hs_config:
+ sys.stderr.write("The configuration file must have a 'database' section.\n")
+ sys.exit(4)
+
+ config = HomeServerConfig()
+ config.parse_config_dict(hs_config, "", "")
+
+ # Instantiate and initialise the homeserver object.
+ hs = MockHomeserver(config)
+
+ # Setup instantiates the store within the homeserver object and updates the
+ # DB.
+ hs.setup()
+
+ if args.run_background_updates:
+ run_background_updates(hs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tox.ini b/tox.ini
index 5a62ec76c2..cfe6a06942 100644
--- a/tox.ini
+++ b/tox.ini
@@ -41,10 +41,10 @@ lint_targets =
scripts/hash_password
scripts/register_new_matrix_user
scripts/synapse_port_db
+ scripts/update_synapse_database
scripts-dev
scripts-dev/build_debian_packages
scripts-dev/sign_json
- scripts-dev/update_database
stubs
contrib
synctl
--
cgit 1.5.1
From 38b7db58859d80f06b8dc94e6a6dd19600778caa Mon Sep 17 00:00:00 2001
From: Max Kratz
Date: Wed, 6 Oct 2021 13:20:41 +0200
Subject: Updated development doc on samling environment for testing. (#10973)
---
changelog.d/10973.doc | 1 +
docs/development/saml.md | 11 +++++------
2 files changed, 6 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/10973.doc
diff --git a/changelog.d/10973.doc b/changelog.d/10973.doc
new file mode 100644
index 0000000000..d7429a9da6
--- /dev/null
+++ b/changelog.d/10973.doc
@@ -0,0 +1 @@
+Fix a dead URL in development documentation (SAML) and change wording from "Riot" to "Element". Contributed by @maxkratz.
diff --git a/docs/development/saml.md b/docs/development/saml.md
index a9bfd2dc05..60a431d686 100644
--- a/docs/development/saml.md
+++ b/docs/development/saml.md
@@ -1,10 +1,9 @@
# How to test SAML as a developer without a server
-https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great
-resource for being able to tinker with the SAML options within Synapse without needing to
-deploy and configure a complicated software stack.
+https://fujifish.github.io/samling/samling.html (https://github.com/fujifish/samling) is a great resource for being able to tinker with the
+SAML options within Synapse without needing to deploy and configure a complicated software stack.
-To make Synapse (and therefore Riot) use it:
+To make Synapse (and therefore Element) use it:
1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab.
2. Copy the XML to your clipboard.
@@ -26,9 +25,9 @@ To make Synapse (and therefore Riot) use it:
the dependencies are installed and ready to go.
7. Restart Synapse.
-Then in Riot:
+Then in Element:
-1. Visit the login page with a Riot pointing at your homeserver.
+1. Visit the login page and point Element towards your homeserver using the `public_baseurl` above.
2. Click the Single Sign-On button.
3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`.
The response must also be signed.
--
cgit 1.5.1
From 370bca32e60a854ab063f1abedb087dacae37e5a Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Wed, 6 Oct 2021 13:56:45 +0100
Subject: Don't drop user dir deltas when server leaves room (#10982)
Fix a long-standing bug where a batch of user directory changes would be
silently dropped if the server left a room early in the batch.
* Pull out `wait_for_background_update` in tests
Co-authored-by: Patrick Cloke
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10982.bugfix | 1 +
synapse/handlers/user_directory.py | 2 +-
tests/handlers/test_stats.py | 21 +++--------------
tests/handlers/test_user_directory.py | 39 +++++++++++++++++++++++++++++++
tests/storage/databases/main/test_room.py | 7 +-----
tests/storage/test_cleanup_extrems.py | 7 +-----
tests/storage/test_client_ips.py | 21 +++--------------
tests/storage/test_event_chain.py | 14 ++---------
tests/storage/test_roommember.py | 14 ++---------
tests/storage/test_user_directory.py | 7 +-----
tests/unittest.py | 9 +++++++
11 files changed, 63 insertions(+), 79 deletions(-)
create mode 100644 changelog.d/10982.bugfix
diff --git a/changelog.d/10982.bugfix b/changelog.d/10982.bugfix
new file mode 100644
index 0000000000..5c9e15eeaa
--- /dev/null
+++ b/changelog.d/10982.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where the remainder of a batch of user directory changes would be silently dropped if the server left a room early in the batch.
\ No newline at end of file
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 18d8c8744e..97f60b5806 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -220,7 +220,7 @@ class UserDirectoryHandler(StateDeltasHandler):
for user_id in user_ids:
await self._handle_remove_user(room_id, user_id)
- return
+ continue
else:
logger.debug("Server is still in room: %r", room_id)
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index 24b7ef6efc..56207f4db6 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -103,12 +103,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# Do the initial population of the stats via the background update
self._add_background_updates()
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
def test_initial_room(self):
"""
@@ -140,12 +135,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# Do the initial population of the user directory via the background update
self._add_background_updates()
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
r = self.get_success(self.get_all_room_state())
@@ -568,12 +558,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
)
)
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
r1stats_complete = self._get_current_stats("room", r1)
u1stats_complete = self._get_current_stats("user", u1)
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index b3c3af113b..03fd5a3e2c 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -363,6 +363,45 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.assertEqual(len(s["results"]), 1)
self.assertEqual(s["results"][0]["user_id"], user)
+ def test_process_join_after_server_leaves_room(self) -> None:
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ bob = self.register_user("bob", "pass")
+ bob_token = self.login(bob, "pass")
+
+ # Alice makes two rooms. Bob joins one of them.
+ room1 = self.helper.create_room_as(alice, tok=alice_token)
+ room2 = self.helper.create_room_as(alice, tok=alice_token)
+ print("room1=", room1)
+ print("room2=", room2)
+ self.helper.join(room1, bob, tok=bob_token)
+
+ # The user sharing tables should have been updated.
+ public1 = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ self.assertEqual(set(public1), {(alice, room1), (alice, room2), (bob, room1)})
+
+ # Alice leaves room1. The user sharing tables should be updated.
+ self.helper.leave(room1, alice, tok=alice_token)
+ public2 = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ self.assertEqual(set(public2), {(alice, room2), (bob, room1)})
+
+ # Pause the processing of new events.
+ dir_handler = self.hs.get_user_directory_handler()
+ dir_handler.update_user_directory = False
+
+ # Bob leaves one room and joins the other.
+ self.helper.leave(room1, bob, tok=bob_token)
+ self.helper.join(room2, bob, tok=bob_token)
+
+ # Process the leave and join in one go.
+ dir_handler.update_user_directory = True
+ dir_handler.notify_new_event()
+ self.wait_for_background_updates()
+
+ # The user sharing tables should have been updated.
+ public3 = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ self.assertEqual(set(public3), {(alice, room2), (bob, room2)})
+
def test_private_room(self) -> None:
"""
A user can be searched for only by people that are either in a public
diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py
index ffee707153..7496974da3 100644
--- a/tests/storage/databases/main/test_room.py
+++ b/tests/storage/databases/main/test_room.py
@@ -79,12 +79,7 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase):
self.store.db_pool.updates._all_done = False
# Now let's actually drive the updates to completion
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
# Make sure the background update filled in the room creator
room_creator_after = self.get_success(
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index 7cc5e621ba..a59c28f896 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -66,12 +66,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
# Ugh, have to reset this flag
self.store.db_pool.updates._all_done = False
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
def test_soft_failed_extremities_handled_correctly(self):
"""Test that extremities are correctly calculated in the presence of
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 3cc8038f1e..dada4f98c9 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -242,12 +242,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
def test_devices_last_seen_bg_update(self):
# First make sure we have completed all updates.
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
user_id = "@user:id"
device_id = "MY_DEVICE"
@@ -311,12 +306,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
self.store.db_pool.updates._all_done = False
# Now let's actually drive the updates to completion
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
# We should now get the correct result again
result = self.get_success(
@@ -337,12 +327,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
def test_old_user_ips_pruned(self):
# First make sure we have completed all updates.
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
user_id = "@user:id"
device_id = "MY_DEVICE"
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index 93136f0717..b31c5eb5ec 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -578,12 +578,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
# Ugh, have to reset this flag
self.store.db_pool.updates._all_done = False
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
# Test that the `has_auth_chain_index` has been set
self.assertTrue(self.get_success(self.store.has_auth_chain_index(room_id)))
@@ -619,12 +614,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
# Ugh, have to reset this flag
self.store.db_pool.updates._all_done = False
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
# Test that the `has_auth_chain_index` has been set
self.assertTrue(self.get_success(self.store.has_auth_chain_index(room_id1)))
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index c72dc40510..2873e22ccf 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -169,12 +169,7 @@ class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def test_can_rerun_update(self):
# First make sure we have completed all updates.
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
# Now let's create a room, which will insert a membership
user = UserID("alice", "test")
@@ -197,9 +192,4 @@ class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
self.store.db_pool.updates._all_done = False
# Now let's actually drive the updates to completion
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index fddfb8db28..9f483ad681 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -212,12 +212,7 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
)
)
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
+ self.wait_for_background_updates()
def test_initial(self) -> None:
"""
diff --git a/tests/unittest.py b/tests/unittest.py
index ae393ee53e..81c1a9e9d2 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -317,6 +317,15 @@ class HomeserverTestCase(TestCase):
self.reactor.advance(0.01)
time.sleep(0.01)
+ def wait_for_background_updates(self) -> None:
+ """Block until all background database updates have completed."""
+ while not self.get_success(
+ self.store.db_pool.updates.has_completed_background_updates()
+ ):
+ self.get_success(
+ self.store.db_pool.updates.do_next_background_update(100), by=0.1
+ )
+
def make_homeserver(self, reactor, clock):
"""
Make and return a homeserver.
--
cgit 1.5.1
From b0460936c8e31a2e0d160d4bba69223036ae26fe Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Wed, 6 Oct 2021 16:03:17 +0200
Subject: Add the synapse-core team as code owners (#10994)
Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com>
---
.github/CODEOWNERS | 2 ++
changelog.d/10994.misc | 1 +
2 files changed, 3 insertions(+)
create mode 100644 .github/CODEOWNERS
create mode 100644 changelog.d/10994.misc
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000000..d6cd75f1d0
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,2 @@
+# Automatically request reviews from the synapse-core team when a pull request comes in.
+* @matrix-org/synapse-core
\ No newline at end of file
diff --git a/changelog.d/10994.misc b/changelog.d/10994.misc
new file mode 100644
index 0000000000..0a8538b01e
--- /dev/null
+++ b/changelog.d/10994.misc
@@ -0,0 +1 @@
+Add a `CODEOWNERS` file to automatically request reviews from the `@matrix-org/synapse-core` team on new pull requests.
--
cgit 1.5.1
From 829f2a82b042d944fef3df55faec924502cdf20d Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Wed, 6 Oct 2021 16:32:16 +0200
Subject: Add a spamchecker callback to allow or deny room joins (#10910)
Co-authored-by: Erik Johnston
---
changelog.d/10910.feature | 1 +
docs/modules/spam_checker_callbacks.md | 15 +++++
synapse/events/spamcheck.py | 24 ++++++++
synapse/handlers/room.py | 2 +
synapse/handlers/room_member.py | 31 ++++++++++
tests/rest/client/test_rooms.py | 101 +++++++++++++++++++++++++++++++++
6 files changed, 174 insertions(+)
create mode 100644 changelog.d/10910.feature
diff --git a/changelog.d/10910.feature b/changelog.d/10910.feature
new file mode 100644
index 0000000000..aee139f8b6
--- /dev/null
+++ b/changelog.d/10910.feature
@@ -0,0 +1 @@
+Add a spam checker callback to allow or deny room joins.
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 7920ac5f8f..92376df993 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -19,6 +19,21 @@ either a `bool` to indicate whether the event must be rejected because of spam,
to indicate the event must be rejected because of spam and to give a rejection reason to
forward to clients.
+### `user_may_join_room`
+
+```python
+async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool
+```
+
+Called when a user is trying to join a room. The module must return a `bool` to indicate
+whether the user can join the room. The user is represented by their Matrix user ID (e.g.
+`@alice:example.com`) and the room is represented by its Matrix ID (e.g.
+`!room:example.com`). The module is also given a boolean to indicate whether the user
+currently has a pending invite in the room.
+
+This callback isn't called if the join is performed by a server administrator, or in the
+context of a room creation.
+
### `user_may_invite`
```python
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index c389f70b8d..ec8863e397 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -44,6 +44,7 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
["synapse.events.EventBase"],
Awaitable[Union[bool, str]],
]
+USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[
@@ -165,6 +166,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
class SpamChecker:
def __init__(self):
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
+ self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
self._user_may_create_room_with_invites_callbacks: List[
@@ -187,6 +189,7 @@ class SpamChecker:
def register_callbacks(
self,
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
+ user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
user_may_create_room_with_invites: Optional[
@@ -206,6 +209,9 @@ class SpamChecker:
if check_event_for_spam is not None:
self._check_event_for_spam_callbacks.append(check_event_for_spam)
+ if user_may_join_room is not None:
+ self._user_may_join_room_callbacks.append(user_may_join_room)
+
if user_may_invite is not None:
self._user_may_invite_callbacks.append(user_may_invite)
@@ -259,6 +265,24 @@ class SpamChecker:
return False
+ async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool):
+ """Checks if a given users is allowed to join a room.
+ Not called when a user creates a room.
+
+ Args:
+ userid: The ID of the user wanting to join the room
+ room_id: The ID of the room the user wants to join
+ is_invited: Whether the user is invited into the room
+
+ Returns:
+ bool: Whether the user may join the room
+ """
+ for callback in self._user_may_join_room_callbacks:
+ if await callback(user_id, room_id, is_invited) is False:
+ return False
+
+ return True
+
async def user_may_invite(
self, inviter_userid: str, invitee_userid: str, room_id: str
) -> bool:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 873e08258e..d40dbd761d 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -860,6 +860,7 @@ class RoomCreationHandler(BaseHandler):
"invite",
ratelimit=False,
content=content,
+ new_room=True,
)
for invite_3pid in invite_3pid_list:
@@ -962,6 +963,7 @@ class RoomCreationHandler(BaseHandler):
"join",
ratelimit=ratelimit,
content=creator_join_profile,
+ new_room=True,
)
# We treat the power levels override specially as this needs to be one
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index c8fb24a20c..0b79dbcf8d 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -434,6 +434,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
+ new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
@@ -451,6 +452,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed: Information from a 3PID invite.
ratelimit: Whether to rate limit the request.
content: The content of the created event.
+ new_room: Whether the membership update is happening in the context of a room
+ creation.
require_consent: Whether consent is required.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
@@ -485,6 +488,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
+ new_room=new_room,
require_consent=require_consent,
outlier=outlier,
prev_event_ids=prev_event_ids,
@@ -504,6 +508,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
+ new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
@@ -523,6 +528,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
third_party_signed:
ratelimit:
content:
+ new_room: Whether the membership update is happening in the context of a room
+ creation.
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
@@ -726,6 +733,30 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
+ # Figure out whether the user is a server admin to determine whether they
+ # should be able to bypass the spam checker.
+ if (
+ self._server_notices_mxid is not None
+ and requester.user.to_string() == self._server_notices_mxid
+ ):
+ # allow the server notices mxid to join rooms
+ bypass_spam_checker = True
+
+ else:
+ bypass_spam_checker = await self.auth.is_server_admin(requester.user)
+
+ inviter = await self._get_inviter(target.to_string(), room_id)
+ if (
+ not bypass_spam_checker
+ # We assume that if the spam checker allowed the user to create
+ # a room then they're allowed to join it.
+ and not new_room
+ and not await self.spam_checker.user_may_join_room(
+ target.to_string(), room_id, is_invited=inviter is not None
+ )
+ ):
+ raise SynapseError(403, "Not allowed to join this room")
+
# Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join(
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index 30bdaa9c27..a41ec6a98f 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -784,6 +784,30 @@ class RoomsCreateTestCase(RoomBase):
# Check that do_3pid_invite wasn't called this time.
self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids))
+ def test_spam_checker_may_join_room(self):
+ """Tests that the user_may_join_room spam checker callback is correctly bypassed
+ when creating a new room.
+ """
+
+ async def user_may_join_room(
+ mxid: str,
+ room_id: str,
+ is_invite: bool,
+ ) -> bool:
+ return False
+
+ join_mock = Mock(side_effect=user_may_join_room)
+ self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock)
+
+ channel = self.make_request(
+ "POST",
+ "/createRoom",
+ {},
+ )
+ self.assertEquals(channel.code, 200, channel.json_body)
+
+ self.assertEquals(join_mock.call_count, 0)
+
class RoomTopicTestCase(RoomBase):
"""Tests /rooms/$room_id/topic REST events."""
@@ -975,6 +999,83 @@ class RoomInviteRatelimitTestCase(RoomBase):
self.helper.invite(room_id, self.user_id, "@other-users:red", expect_code=429)
+class RoomJoinTestCase(RoomBase):
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, homeserver):
+ self.user1 = self.register_user("thomas", "hackme")
+ self.tok1 = self.login("thomas", "hackme")
+
+ self.user2 = self.register_user("teresa", "hackme")
+ self.tok2 = self.login("teresa", "hackme")
+
+ self.room1 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1)
+ self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1)
+ self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1)
+
+ def test_spam_checker_may_join_room(self):
+ """Tests that the user_may_join_room spam checker callback is correctly called
+ and blocks room joins when needed.
+ """
+
+ # Register a dummy callback. Make it allow all room joins for now.
+ return_value = True
+
+ async def user_may_join_room(
+ userid: str,
+ room_id: str,
+ is_invited: bool,
+ ) -> bool:
+ return return_value
+
+ callback_mock = Mock(side_effect=user_may_join_room)
+ self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock)
+
+ # Join a first room, without being invited to it.
+ self.helper.join(self.room1, self.user2, tok=self.tok2)
+
+ # Check that the callback was called with the right arguments.
+ expected_call_args = (
+ (
+ self.user2,
+ self.room1,
+ False,
+ ),
+ )
+ self.assertEquals(
+ callback_mock.call_args,
+ expected_call_args,
+ callback_mock.call_args,
+ )
+
+ # Join a second room, this time with an invite for it.
+ self.helper.invite(self.room2, self.user1, self.user2, tok=self.tok1)
+ self.helper.join(self.room2, self.user2, tok=self.tok2)
+
+ # Check that the callback was called with the right arguments.
+ expected_call_args = (
+ (
+ self.user2,
+ self.room2,
+ True,
+ ),
+ )
+ self.assertEquals(
+ callback_mock.call_args,
+ expected_call_args,
+ callback_mock.call_args,
+ )
+
+ # Now make the callback deny all room joins, and check that a join actually fails.
+ return_value = False
+ self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2)
+
+
class RoomJoinRatelimitTestCase(RoomBase):
user_id = "@sid1:red"
--
cgit 1.5.1
From f4b1a9a527273ef71b2f7d970642b7af45462e0f Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Wed, 6 Oct 2021 10:47:41 -0400
Subject: Require direct references to configuration variables. (#10985)
This removes the magic allowing accessing configurable
variables directly from the config object. It is now required
that a specific configuration class is used (e.g. `config.foo`
must be replaced with `config.server.foo`).
---
changelog.d/10985.misc | 1 +
scripts/synapse_port_db | 4 +-
scripts/update_synapse_database | 2 +-
synapse/app/_base.py | 2 +-
synapse/app/admin_cmd.py | 4 +-
synapse/app/homeserver.py | 2 +-
synapse/config/_base.py | 64 ++++----------------------
synapse/config/account_validity.py | 2 +-
synapse/config/cas.py | 2 +-
synapse/config/emailconfig.py | 9 ++--
synapse/config/key.py | 6 ++-
synapse/config/oidc.py | 2 +-
synapse/config/registration.py | 7 ++-
synapse/config/repository.py | 2 +-
synapse/config/saml2.py | 2 +-
synapse/config/server_notices.py | 4 +-
synapse/config/sso.py | 6 ++-
synapse/handlers/account_validity.py | 8 +---
synapse/handlers/room_member.py | 7 ++-
synapse/replication/tcp/client.py | 2 +-
synapse/replication/tcp/handler.py | 7 ++-
synapse/rest/client/auth.py | 2 +-
synapse/rest/client/push_rule.py | 4 +-
synapse/storage/databases/main/push_rule.py | 4 +-
synapse/storage/databases/main/registration.py | 4 +-
tests/config/test_base.py | 21 +++++----
tests/config/test_cache.py | 50 ++++++++------------
tests/config/test_load.py | 12 +++--
tests/config/test_tls.py | 38 +++++++--------
tests/storage/test_appservice.py | 2 +-
tests/storage/test_txn_limit.py | 2 +-
31 files changed, 124 insertions(+), 160 deletions(-)
create mode 100644 changelog.d/10985.misc
diff --git a/changelog.d/10985.misc b/changelog.d/10985.misc
new file mode 100644
index 0000000000..586a0b3a96
--- /dev/null
+++ b/changelog.d/10985.misc
@@ -0,0 +1 @@
+Use direct references to config flags.
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index fa6ac6d93a..a947d9e49e 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -215,7 +215,7 @@ class MockHomeserver:
def __init__(self, config):
self.clock = Clock(reactor)
self.config = config
- self.hostname = config.server_name
+ self.hostname = config.server.server_name
self.version_string = "Synapse/" + get_version_string(synapse)
def get_clock(self):
@@ -583,7 +583,7 @@ class Porter(object):
return
self.postgres_store = self.build_db_store(
- self.hs_config.get_single_database()
+ self.hs_config.database.get_single_database()
)
await self.run_background_updates_on_postgres()
diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database
index 26b29b0b45..6c088bad93 100755
--- a/scripts/update_synapse_database
+++ b/scripts/update_synapse_database
@@ -36,7 +36,7 @@ class MockHomeserver(HomeServer):
def __init__(self, config, **kwargs):
super(MockHomeserver, self).__init__(
- config.server_name, reactor=reactor, config=config, **kwargs
+ config.server.server_name, reactor=reactor, config=config, **kwargs
)
self.version_string = "Synapse/" + get_version_string(synapse)
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 749bc1deb9..4a204a5823 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -301,7 +301,7 @@ def refresh_certificate(hs):
if not hs.config.server.has_tls_listener():
return
- hs.config.read_certificate_from_disk()
+ hs.config.tls.read_certificate_from_disk()
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
if hs._listening_services:
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 556bcc124e..13d20af457 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -197,9 +197,9 @@ def start(config_options):
# Explicitly disable background processes
config.server.update_user_directory = False
config.worker.run_background_tasks = False
- config.start_pushers = False
+ config.worker.start_pushers = False
config.pusher_shard_config.instances = []
- config.send_federation = False
+ config.worker.send_federation = False
config.federation_shard_config.instances = []
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 2b2d4bbf83..422f03cc04 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -234,7 +234,7 @@ class SynapseHomeServer(HomeServer):
)
if name in ["media", "federation", "client"]:
- if self.config.media.enable_media_repo:
+ if self.config.server.enable_media_repo:
media_repo = self.get_media_repository_resource()
resources.update(
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 26152b0924..7c4428a138 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -118,21 +118,6 @@ class Config:
"synapse", "res/templates"
)
- def __getattr__(self, item: str) -> Any:
- """
- Try and fetch a configuration option that does not exist on this class.
-
- This is so that existing configs that rely on `self.value`, where value
- is actually from a different config section, continue to work.
- """
- if item in ["generate_config_section", "read_config"]:
- raise AttributeError(item)
-
- if self.root is None:
- raise AttributeError(item)
- else:
- return self.root._get_unclassed_config(self.section, item)
-
@staticmethod
def parse_size(value):
if isinstance(value, int):
@@ -289,7 +274,9 @@ class Config:
env.filters.update(
{
"format_ts": _format_ts_filter,
- "mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl),
+ "mxc_to_http": _create_mxc_to_http_filter(
+ self.root.server.public_baseurl
+ ),
}
)
@@ -311,8 +298,6 @@ class RootConfig:
config_classes = []
def __init__(self):
- self._configs = OrderedDict()
-
for config_class in self.config_classes:
if config_class.section is None:
raise ValueError("%r requires a section name" % (config_class,))
@@ -321,42 +306,7 @@ class RootConfig:
conf = config_class(self)
except Exception as e:
raise Exception("Failed making %s: %r" % (config_class.section, e))
- self._configs[config_class.section] = conf
-
- def __getattr__(self, item: str) -> Any:
- """
- Redirect lookups on this object either to config objects, or values on
- config objects, so that `config.tls.blah` works, as well as legacy uses
- of things like `config.server.server_name`. It will first look up the config
- section name, and then values on those config classes.
- """
- if item in self._configs.keys():
- return self._configs[item]
-
- return self._get_unclassed_config(None, item)
-
- def _get_unclassed_config(self, asking_section: Optional[str], item: str):
- """
- Fetch a config value from one of the instantiated config classes that
- has not been fetched directly.
-
- Args:
- asking_section: If this check is coming from a Config child, which
- one? This section will not be asked if it has the value.
- item: The configuration value key.
-
- Raises:
- AttributeError if no config classes have the config key. The body
- will contain what sections were checked.
- """
- for key, val in self._configs.items():
- if key == asking_section:
- continue
-
- if item in dir(val):
- return getattr(val, item)
-
- raise AttributeError(item, "not found in %s" % (list(self._configs.keys()),))
+ setattr(self, config_class.section, conf)
def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[str, Any]:
"""
@@ -373,9 +323,11 @@ class RootConfig:
"""
res = OrderedDict()
- for name, config in self._configs.items():
+ for config_class in self.config_classes:
+ config = getattr(self, config_class.section)
+
if hasattr(config, func_name):
- res[name] = getattr(config, func_name)(*args, **kwargs)
+ res[config_class.section] = getattr(config, func_name)(*args, **kwargs)
return res
diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py
index ffaffc4931..b56c2a24df 100644
--- a/synapse/config/account_validity.py
+++ b/synapse/config/account_validity.py
@@ -76,7 +76,7 @@ class AccountValidityConfig(Config):
)
if self.account_validity_renew_by_email_enabled:
- if not self.public_baseurl:
+ if not self.root.server.public_baseurl:
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
# Load account validity templates.
diff --git a/synapse/config/cas.py b/synapse/config/cas.py
index 901f4123e1..9b58ecf3d8 100644
--- a/synapse/config/cas.py
+++ b/synapse/config/cas.py
@@ -37,7 +37,7 @@ class CasConfig(Config):
# The public baseurl is required because it is used by the redirect
# template.
- public_baseurl = self.public_baseurl
+ public_baseurl = self.root.server.public_baseurl
if not public_baseurl:
raise ConfigError("cas_config requires a public_baseurl to be set")
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 936abe6178..8ff59aa2f8 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -19,7 +19,6 @@ import email.utils
import logging
import os
from enum import Enum
-from typing import Optional
import attr
@@ -135,7 +134,7 @@ class EmailConfig(Config):
# msisdn is currently always remote while Synapse does not support any method of
# sending SMS messages
ThreepidBehaviour.REMOTE
- if self.account_threepid_delegate_email
+ if self.root.registration.account_threepid_delegate_email
else ThreepidBehaviour.LOCAL
)
# Prior to Synapse v1.4.0, there was another option that defined whether Synapse would
@@ -144,7 +143,7 @@ class EmailConfig(Config):
# identity server in the process.
self.using_identity_server_from_trusted_list = False
if (
- not self.account_threepid_delegate_email
+ not self.root.registration.account_threepid_delegate_email
and config.get("trust_identity_server_for_password_resets", False) is True
):
# Use the first entry in self.trusted_third_party_id_servers instead
@@ -156,7 +155,7 @@ class EmailConfig(Config):
# trusted_third_party_id_servers does not contain a scheme whereas
# account_threepid_delegate_email is expected to. Presume https
- self.account_threepid_delegate_email: Optional[str] = (
+ self.root.registration.account_threepid_delegate_email = (
"https://" + first_trusted_identity_server
)
self.using_identity_server_from_trusted_list = True
@@ -335,7 +334,7 @@ class EmailConfig(Config):
"client_base_url", email_config.get("riot_base_url", None)
)
- if self.account_validity_renew_by_email_enabled:
+ if self.root.account_validity.account_validity_renew_by_email_enabled:
expiry_template_html = email_config.get(
"expiry_template_html", "notice_expiry.html"
)
diff --git a/synapse/config/key.py b/synapse/config/key.py
index 94a9063043..015dbb8a67 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -145,11 +145,13 @@ class KeyConfig(Config):
# list of TrustedKeyServer objects
self.key_servers = list(
- _parse_key_servers(key_servers, self.federation_verify_certificates)
+ _parse_key_servers(
+ key_servers, self.root.tls.federation_verify_certificates
+ )
)
self.macaroon_secret_key = config.get(
- "macaroon_secret_key", self.registration_shared_secret
+ "macaroon_secret_key", self.root.registration.registration_shared_secret
)
if not self.macaroon_secret_key:
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 7e67fbada1..10f5796330 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -58,7 +58,7 @@ class OIDCConfig(Config):
"Multiple OIDC providers have the idp_id %r." % idp_id
)
- public_baseurl = self.public_baseurl
+ public_baseurl = self.root.server.public_baseurl
if public_baseurl is None:
raise ConfigError("oidc_config requires a public_baseurl to be set")
self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback"
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 7cffdacfa5..a3d2a38c4c 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -45,7 +45,10 @@ class RegistrationConfig(Config):
account_threepid_delegates = config.get("account_threepid_delegates") or {}
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
- if self.account_threepid_delegate_msisdn and not self.public_baseurl:
+ if (
+ self.account_threepid_delegate_msisdn
+ and not self.root.server.public_baseurl
+ ):
raise ConfigError(
"The configuration option `public_baseurl` is required if "
"`account_threepid_delegate.msisdn` is set, such that "
@@ -85,7 +88,7 @@ class RegistrationConfig(Config):
if mxid_localpart:
# Convert the localpart to a full mxid.
self.auto_join_user_id = UserID(
- mxid_localpart, self.server_name
+ mxid_localpart, self.root.server.server_name
).to_string()
if self.autocreate_auto_join_rooms:
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 7481f3bf5f..69906a98d4 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -94,7 +94,7 @@ class ContentRepositoryConfig(Config):
# Only enable the media repo if either the media repo is enabled or the
# current worker app is the media repo.
if (
- self.enable_media_repo is False
+ self.root.server.enable_media_repo is False
and config.get("worker_app") != "synapse.app.media_repository"
):
self.can_load_media_repo = False
diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py
index 05e983625d..9c51b6a25a 100644
--- a/synapse/config/saml2.py
+++ b/synapse/config/saml2.py
@@ -199,7 +199,7 @@ class SAML2Config(Config):
"""
import saml2
- public_baseurl = self.public_baseurl
+ public_baseurl = self.root.server.public_baseurl
if public_baseurl is None:
raise ConfigError("saml2_config requires a public_baseurl to be set")
diff --git a/synapse/config/server_notices.py b/synapse/config/server_notices.py
index 48bf3241b6..bde4e879d9 100644
--- a/synapse/config/server_notices.py
+++ b/synapse/config/server_notices.py
@@ -73,7 +73,9 @@ class ServerNoticesConfig(Config):
return
mxid_localpart = c["system_mxid_localpart"]
- self.server_notices_mxid = UserID(mxid_localpart, self.server_name).to_string()
+ self.server_notices_mxid = UserID(
+ mxid_localpart, self.root.server.server_name
+ ).to_string()
self.server_notices_mxid_display_name = c.get("system_mxid_display_name", None)
self.server_notices_mxid_avatar_url = c.get("system_mxid_avatar_url", None)
# todo: i18n
diff --git a/synapse/config/sso.py b/synapse/config/sso.py
index 524a7ff3aa..11a9b76aa0 100644
--- a/synapse/config/sso.py
+++ b/synapse/config/sso.py
@@ -103,8 +103,10 @@ class SSOConfig(Config):
# the client's.
# public_baseurl is an optional setting, so we only add the fallback's URL to the
# list if it's provided (because we can't figure out what that URL is otherwise).
- if self.public_baseurl:
- login_fallback_url = self.public_baseurl + "_matrix/static/client/login"
+ if self.root.server.public_baseurl:
+ login_fallback_url = (
+ self.root.server.public_baseurl + "_matrix/static/client/login"
+ )
self.sso_client_whitelist.append(login_fallback_url)
def generate_config_section(self, **kwargs):
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 5a5f124ddf..87e415df75 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -67,12 +67,8 @@ class AccountValidityHandler:
and self._account_validity_renew_by_email_enabled
):
# Don't do email-specific configuration if renewal by email is disabled.
- self._template_html = (
- hs.config.account_validity.account_validity_template_html
- )
- self._template_text = (
- hs.config.account_validity.account_validity_template_text
- )
+ self._template_html = hs.config.email.account_validity_template_html
+ self._template_text = hs.config.email.account_validity_template_text
self._renew_email_subject = (
hs.config.account_validity.account_validity_renew_email_subject
)
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 0b79dbcf8d..c05461bf2a 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1499,8 +1499,11 @@ class RoomMemberMasterHandler(RoomMemberHandler):
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
- check_complexity = self.hs.config.limit_remote_rooms.enabled
- if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
+ check_complexity = self.hs.config.server.limit_remote_rooms.enabled
+ if (
+ check_complexity
+ and self.hs.config.server.limit_remote_rooms.admins_can_join
+ ):
check_complexity = not await self.auth.is_server_admin(user)
if check_complexity:
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 37769ace48..961c17762e 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -117,7 +117,7 @@ class ReplicationDataHandler:
self._instance_name = hs.get_instance_name()
self._typing_handler = hs.get_typing_handler()
- self._notify_pushers = hs.config.start_pushers
+ self._notify_pushers = hs.config.worker.start_pushers
self._pusher_pool = hs.get_pusherpool()
self._presence_handler = hs.get_presence_handler()
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index d64d1dbacd..6aa9318027 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -171,7 +171,10 @@ class ReplicationCommandHandler:
if hs.config.worker.worker_app is not None:
continue
- if stream.NAME == FederationStream.NAME and hs.config.send_federation:
+ if (
+ stream.NAME == FederationStream.NAME
+ and hs.config.worker.send_federation
+ ):
# We only support federation stream if federation sending
# has been disabled on the master.
continue
@@ -225,7 +228,7 @@ class ReplicationCommandHandler:
self._is_master = hs.config.worker.worker_app is None
self._federation_sender = None
- if self._is_master and not hs.config.send_federation:
+ if self._is_master and not hs.config.worker.send_federation:
self._federation_sender = hs.get_federation_sender()
self._server_notices_sender = None
diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index c9ad35a3ad..9c15a04338 100644
--- a/synapse/rest/client/auth.py
+++ b/synapse/rest/client/auth.py
@@ -48,7 +48,7 @@ class AuthRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
self.registration_handler = hs.get_registration_handler()
self.recaptcha_template = hs.config.captcha.recaptcha_template
- self.terms_template = hs.config.terms_template
+ self.terms_template = hs.config.consent.terms_template
self.registration_token_template = (
hs.config.registration.registration_token_template
)
diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py
index ecebc46e8d..6f796d5e50 100644
--- a/synapse/rest/client/push_rule.py
+++ b/synapse/rest/client/push_rule.py
@@ -61,7 +61,9 @@ class PushRuleRestServlet(RestServlet):
self.notifier = hs.get_notifier()
self._is_worker = hs.config.worker.worker_app is not None
- self._users_new_default_push_rules = hs.config.users_new_default_push_rules
+ self._users_new_default_push_rules = (
+ hs.config.server.users_new_default_push_rules
+ )
async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
if self._is_worker:
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index a7fb8cd848..b81e33964a 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -101,7 +101,9 @@ class PushRulesWorkerStore(
prefilled_cache=push_rules_prefill,
)
- self._users_new_default_push_rules = hs.config.users_new_default_push_rules
+ self._users_new_default_push_rules = (
+ hs.config.server.users_new_default_push_rules
+ )
@abc.abstractmethod
def get_max_push_rules_stream_id(self):
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index de262fbf5a..7de4ad7f9b 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -1778,7 +1778,9 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
super().__init__(database, db_conn, hs)
- self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors
+ self._ignore_unknown_session_error = (
+ hs.config.server.request_token_inhibit_3pid_errors
+ )
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
diff --git a/tests/config/test_base.py b/tests/config/test_base.py
index baa5313fb3..6a52f862f4 100644
--- a/tests/config/test_base.py
+++ b/tests/config/test_base.py
@@ -14,23 +14,28 @@
import os.path
import tempfile
+from unittest.mock import Mock
from synapse.config import ConfigError
+from synapse.config._base import Config
from synapse.util.stringutils import random_string
from tests import unittest
-class BaseConfigTestCase(unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
- self.hs = hs
+class BaseConfigTestCase(unittest.TestCase):
+ def setUp(self):
+ # The root object needs a server property with a public_baseurl.
+ root = Mock()
+ root.server.public_baseurl = "http://test"
+ self.config = Config(root)
def test_loading_missing_templates(self):
# Use a temporary directory that exists on the system, but that isn't likely to
# contain template files
with tempfile.TemporaryDirectory() as tmp_dir:
# Attempt to load an HTML template from our custom template directory
- template = self.hs.config.read_templates(["sso_error.html"], (tmp_dir,))[0]
+ template = self.config.read_templates(["sso_error.html"], (tmp_dir,))[0]
# If no errors, we should've gotten the default template instead
@@ -60,7 +65,7 @@ class BaseConfigTestCase(unittest.HomeserverTestCase):
# Attempt to load the template from our custom template directory
template = (
- self.hs.config.read_templates([template_filename], (tmp_dir,))
+ self.config.read_templates([template_filename], (tmp_dir,))
)[0]
# Render the template
@@ -97,7 +102,7 @@ class BaseConfigTestCase(unittest.HomeserverTestCase):
# Retrieve the template.
template = (
- self.hs.config.read_templates(
+ self.config.read_templates(
[template_filename],
(td.name for td in tempdirs),
)
@@ -118,7 +123,7 @@ class BaseConfigTestCase(unittest.HomeserverTestCase):
# Retrieve the template.
template = (
- self.hs.config.read_templates(
+ self.config.read_templates(
[other_template_name],
(td.name for td in tempdirs),
)
@@ -134,6 +139,6 @@ class BaseConfigTestCase(unittest.HomeserverTestCase):
def test_loading_template_from_nonexistent_custom_directory(self):
with self.assertRaises(ConfigError):
- self.hs.config.read_templates(
+ self.config.read_templates(
["some_filename.html"], ("a_nonexistent_directory",)
)
diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py
index 857d9cd096..f518abdb7a 100644
--- a/tests/config/test_cache.py
+++ b/tests/config/test_cache.py
@@ -12,39 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.config._base import Config, RootConfig
from synapse.config.cache import CacheConfig, add_resizable_cache
from synapse.util.caches.lrucache import LruCache
from tests.unittest import TestCase
-class FakeServer(Config):
- section = "server"
-
-
-class TestConfig(RootConfig):
- config_classes = [FakeServer, CacheConfig]
-
-
class CacheConfigTests(TestCase):
def setUp(self):
# Reset caches before each test
- TestConfig().caches.reset()
+ self.config = CacheConfig()
+
+ def tearDown(self):
+ self.config.reset()
def test_individual_caches_from_environ(self):
"""
Individual cache factors will be loaded from the environment.
"""
config = {}
- t = TestConfig()
- t.caches._environ = {
+ self.config._environ = {
"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2",
"SYNAPSE_NOT_CACHE": "BLAH",
}
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(dict(t.caches.cache_factors), {"something_or_other": 2.0})
+ self.assertEqual(dict(self.config.cache_factors), {"something_or_other": 2.0})
def test_config_overrides_environ(self):
"""
@@ -52,15 +45,14 @@ class CacheConfigTests(TestCase):
over those in the config.
"""
config = {"caches": {"per_cache_factors": {"foo": 2, "bar": 3}}}
- t = TestConfig()
- t.caches._environ = {
+ self.config._environ = {
"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2",
"SYNAPSE_CACHE_FACTOR_FOO": 1,
}
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
self.assertEqual(
- dict(t.caches.cache_factors),
+ dict(self.config.cache_factors),
{"foo": 1.0, "bar": 3.0, "something_or_other": 2.0},
)
@@ -76,8 +68,7 @@ class CacheConfigTests(TestCase):
self.assertEqual(cache.max_size, 50)
config = {"caches": {"per_cache_factors": {"foo": 3}}}
- t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config)
self.assertEqual(cache.max_size, 300)
@@ -88,8 +79,7 @@ class CacheConfigTests(TestCase):
there is one.
"""
config = {"caches": {"per_cache_factors": {"foo": 2}}}
- t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
cache = LruCache(100)
add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor)
@@ -106,8 +96,7 @@ class CacheConfigTests(TestCase):
self.assertEqual(cache.max_size, 50)
config = {"caches": {"global_factor": 4}}
- t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
self.assertEqual(cache.max_size, 400)
@@ -118,8 +107,7 @@ class CacheConfigTests(TestCase):
is no per-cache factor.
"""
config = {"caches": {"global_factor": 1.5}}
- t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
cache = LruCache(100)
add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor)
@@ -133,12 +121,11 @@ class CacheConfigTests(TestCase):
"per_cache_factors": {"*cache_a*": 5, "cache_b": 6, "cache_c": 2}
}
}
- t = TestConfig()
- t.caches._environ = {
+ self.config._environ = {
"SYNAPSE_CACHE_FACTOR_CACHE_A": "2",
"SYNAPSE_CACHE_FACTOR_CACHE_B": 3,
}
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
cache_a = LruCache(100)
add_resizable_cache("*cache_a*", cache_resize_callback=cache_a.set_cache_factor)
@@ -158,11 +145,10 @@ class CacheConfigTests(TestCase):
"""
config = {"caches": {"event_cache_size": "10k"}}
- t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ self.config.read_config(config, config_dir_path="", data_dir_path="")
cache = LruCache(
- max_size=t.caches.event_cache_size,
+ max_size=self.config.event_cache_size,
apply_cache_factor_from_config=False,
)
add_resizable_cache("event_cache", cache_resize_callback=cache.set_cache_factor)
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 8e49ca26d9..59635de205 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -49,7 +49,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
config = HomeServerConfig.load_config("", ["-c", self.file])
self.assertTrue(
- hasattr(config, "macaroon_secret_key"),
+ hasattr(config.key, "macaroon_secret_key"),
"Want config to have attr macaroon_secret_key",
)
if len(config.key.macaroon_secret_key) < 5:
@@ -60,7 +60,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
self.assertTrue(
- hasattr(config, "macaroon_secret_key"),
+ hasattr(config.key, "macaroon_secret_key"),
"Want config to have attr macaroon_secret_key",
)
if len(config.key.macaroon_secret_key) < 5:
@@ -74,8 +74,12 @@ class ConfigLoadingTestCase(unittest.TestCase):
config1 = HomeServerConfig.load_config("", ["-c", self.file])
config2 = HomeServerConfig.load_config("", ["-c", self.file])
config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
- self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key)
- self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key)
+ self.assertEqual(
+ config1.key.macaroon_secret_key, config2.key.macaroon_secret_key
+ )
+ self.assertEqual(
+ config1.key.macaroon_secret_key, config3.key.macaroon_secret_key
+ )
def test_disable_registration(self):
self.generate_config()
diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py
index b6bc1876b5..9ba5781573 100644
--- a/tests/config/test_tls.py
+++ b/tests/config/test_tls.py
@@ -42,9 +42,9 @@ class TLSConfigTests(TestCase):
"""
config = {}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1")
def test_tls_client_minimum_set(self):
"""
@@ -52,29 +52,29 @@ class TLSConfigTests(TestCase):
"""
config = {"federation_client_minimum_tls_version": 1}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1")
config = {"federation_client_minimum_tls_version": 1.1}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1.1")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.1")
config = {"federation_client_minimum_tls_version": 1.2}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1.2")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.2")
# Also test a string version
config = {"federation_client_minimum_tls_version": "1"}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1")
config = {"federation_client_minimum_tls_version": "1.2"}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1.2")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.2")
def test_tls_client_minimum_1_point_3_missing(self):
"""
@@ -91,7 +91,7 @@ class TLSConfigTests(TestCase):
config = {"federation_client_minimum_tls_version": 1.3}
t = TestConfig()
with self.assertRaises(ConfigError) as e:
- t.read_config(config, config_dir_path="", data_dir_path="")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
self.assertEqual(
e.exception.args[0],
(
@@ -112,8 +112,8 @@ class TLSConfigTests(TestCase):
config = {"federation_client_minimum_tls_version": 1.3}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
- self.assertEqual(t.federation_client_minimum_tls_version, "1.3")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
+ self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.3")
def test_tls_client_minimum_set_passed_through_1_2(self):
"""
@@ -121,7 +121,7 @@ class TLSConfigTests(TestCase):
"""
config = {"federation_client_minimum_tls_version": 1.2}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
cf = FederationPolicyForHTTPS(t)
options = _get_ssl_context_options(cf._verify_ssl_context)
@@ -137,7 +137,7 @@ class TLSConfigTests(TestCase):
"""
config = {"federation_client_minimum_tls_version": 1}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
cf = FederationPolicyForHTTPS(t)
options = _get_ssl_context_options(cf._verify_ssl_context)
@@ -159,7 +159,7 @@ class TLSConfigTests(TestCase):
}
t = TestConfig()
e = self.assertRaises(
- ConfigError, t.read_config, config, config_dir_path="", data_dir_path=""
+ ConfigError, t.tls.read_config, config, config_dir_path="", data_dir_path=""
)
self.assertIn("IDNA domain names", str(e))
@@ -174,7 +174,7 @@ class TLSConfigTests(TestCase):
]
}
t = TestConfig()
- t.read_config(config, config_dir_path="", data_dir_path="")
+ t.tls.read_config(config, config_dir_path="", data_dir_path="")
cf = FederationPolicyForHTTPS(t)
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index cf9748f218..f26d5acf9c 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -126,7 +126,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
self.db_pool = database._db_pool
self.engine = database.engine
- db_config = hs.config.get_single_database()
+ db_config = hs.config.database.get_single_database()
self.store = TestTransactionStore(
database, make_conn(db_config, self.engine, "test"), hs
)
diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py
index 6ff3ebb137..ace82cbf42 100644
--- a/tests/storage/test_txn_limit.py
+++ b/tests/storage/test_txn_limit.py
@@ -22,7 +22,7 @@ class SQLTransactionLimitTestCase(unittest.HomeserverTestCase):
return self.setup_test_homeserver(db_txn_limit=1000)
def test_config(self):
- db_config = self.hs.config.get_single_database()
+ db_config = self.hs.config.database.get_single_database()
self.assertEqual(db_config.config["txn_limit"], 1000)
def test_select(self):
--
cgit 1.5.1
From 4e5162106436f3fddd12561d316d19fd23148800 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Wed, 6 Oct 2021 17:18:13 +0200
Subject: Add a spamchecker method to allow or deny 3pid invites (#10894)
This is in the context of creating new module callbacks that modules in https://github.com/matrix-org/synapse-dinsic can use, in an effort to reconcile the spam checker API in synapse-dinsic with the one in mainline.
Note that a module callback already exists for 3pid invites (https://matrix-org.github.io/synapse/develop/modules/third_party_rules_callbacks.html#check_threepid_can_be_invited) but it doesn't check whether the sender of the invite is allowed to send it.
---
changelog.d/10894.feature | 1 +
docs/modules/spam_checker_callbacks.md | 35 +++++++++++++++++
synapse/events/spamcheck.py | 35 +++++++++++++++++
synapse/handlers/room_member.py | 12 ++++++
tests/rest/client/test_rooms.py | 70 ++++++++++++++++++++++++++++++++++
5 files changed, 153 insertions(+)
create mode 100644 changelog.d/10894.feature
diff --git a/changelog.d/10894.feature b/changelog.d/10894.feature
new file mode 100644
index 0000000000..a4f968bed1
--- /dev/null
+++ b/changelog.d/10894.feature
@@ -0,0 +1 @@
+Add a `user_may_send_3pid_invite` spam checker callback for modules to allow or deny 3PID invites.
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 92376df993..787e99074a 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -44,6 +44,41 @@ Called when processing an invitation. The module must return a `bool` indicating
the inviter can invite the invitee to the given room. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`).
+### `user_may_send_3pid_invite`
+
+```python
+async def user_may_send_3pid_invite(
+ inviter: str,
+ medium: str,
+ address: str,
+ room_id: str,
+) -> bool
+```
+
+Called when processing an invitation using a third-party identifier (also called a 3PID,
+e.g. an email address or a phone number). The module must return a `bool` indicating
+whether the inviter can invite the invitee to the given room.
+
+The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the
+invitee is represented by its medium (e.g. "email") and its address
+(e.g. `alice@example.com`). See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types)
+for more information regarding third-party identifiers.
+
+For example, a call to this callback to send an invitation to the email address
+`alice@example.com` would look like this:
+
+```python
+await user_may_send_3pid_invite(
+ "@bob:example.com", # The inviter's user ID
+ "email", # The medium of the 3PID to invite
+ "alice@example.com", # The address of the 3PID to invite
+ "!some_room:example.com", # The ID of the room to send the invite into
+)
+```
+
+**Note**: If the third-party identifier is already associated with a matrix user ID,
+[`user_may_invite`](#user_may_invite) will be used instead.
+
### `user_may_create_room`
```python
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index ec8863e397..ae4c8ab257 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -46,6 +46,7 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
]
USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
+USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]]
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[
[str, List[str], List[Dict[str, str]]], Awaitable[bool]
@@ -168,6 +169,9 @@ class SpamChecker:
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
+ self._user_may_send_3pid_invite_callbacks: List[
+ USER_MAY_SEND_3PID_INVITE_CALLBACK
+ ] = []
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
self._user_may_create_room_with_invites_callbacks: List[
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
@@ -191,6 +195,7 @@ class SpamChecker:
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
+ user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
user_may_create_room_with_invites: Optional[
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
@@ -215,6 +220,11 @@ class SpamChecker:
if user_may_invite is not None:
self._user_may_invite_callbacks.append(user_may_invite)
+ if user_may_send_3pid_invite is not None:
+ self._user_may_send_3pid_invite_callbacks.append(
+ user_may_send_3pid_invite,
+ )
+
if user_may_create_room is not None:
self._user_may_create_room_callbacks.append(user_may_create_room)
@@ -304,6 +314,31 @@ class SpamChecker:
return True
+ async def user_may_send_3pid_invite(
+ self, inviter_userid: str, medium: str, address: str, room_id: str
+ ) -> bool:
+ """Checks if a given user may invite a given threepid into the room
+
+ If this method returns false, the threepid invite will be rejected.
+
+ Note that if the threepid is already associated with a Matrix user ID, Synapse
+ will call user_may_invite with said user ID instead.
+
+ Args:
+ inviter_userid: The user ID of the sender of the invitation
+ medium: The 3PID's medium (e.g. "email")
+ address: The 3PID's address (e.g. "alice@example.com")
+ room_id: The room ID
+
+ Returns:
+ True if the user may send the invite, otherwise False
+ """
+ for callback in self._user_may_send_3pid_invite_callbacks:
+ if await callback(inviter_userid, medium, address, room_id) is False:
+ return False
+
+ return True
+
async def user_may_create_room(self, userid: str) -> bool:
"""Checks if a given user may create a room
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index c05461bf2a..eef337feeb 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1299,10 +1299,22 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if invitee:
# Note that update_membership with an action of "invite" can raise
# a ShadowBanError, but this was done above already.
+ # We don't check the invite against the spamchecker(s) here (through
+ # user_may_invite) because we'll do it further down the line anyway (in
+ # update_membership_locked).
_, stream_id = await self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
)
else:
+ # Check if the spamchecker(s) allow this invite to go through.
+ if not await self.spam_checker.user_may_send_3pid_invite(
+ inviter_userid=requester.user.to_string(),
+ medium=medium,
+ address=address,
+ room_id=room_id,
+ ):
+ raise SynapseError(403, "Cannot send threepid invite")
+
stream_id = await self._make_and_store_3pid_invite(
requester,
id_server,
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index a41ec6a98f..376853fd65 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -2531,3 +2531,73 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase):
"""An alias which does not point to the room raises a SynapseError."""
self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400)
self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400)
+
+
+class ThreepidInviteTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, homeserver):
+ self.user_id = self.register_user("thomas", "hackme")
+ self.tok = self.login("thomas", "hackme")
+
+ self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
+
+ def test_threepid_invite_spamcheck(self):
+ # Mock a few functions to prevent the test from failing due to failing to talk to
+ # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we
+ # can check its call_count later on during the test.
+ make_invite_mock = Mock(return_value=make_awaitable(0))
+ self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock
+ self.hs.get_identity_handler().lookup_3pid = Mock(
+ return_value=make_awaitable(None),
+ )
+
+ # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it
+ # allow everything for now.
+ mock = Mock(return_value=make_awaitable(True))
+ self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock)
+
+ # Send a 3PID invite into the room and check that it succeeded.
+ email_to_invite = "teresa@example.com"
+ channel = self.make_request(
+ method="POST",
+ path="/rooms/" + self.room_id + "/invite",
+ content={
+ "id_server": "example.com",
+ "id_access_token": "sometoken",
+ "medium": "email",
+ "address": email_to_invite,
+ },
+ access_token=self.tok,
+ )
+ self.assertEquals(channel.code, 200)
+
+ # Check that the callback was called with the right params.
+ mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id)
+
+ # Check that the call to send the invite was made.
+ make_invite_mock.assert_called_once()
+
+ # Now change the return value of the callback to deny any invite and test that
+ # we can't send the invite.
+ mock.return_value = make_awaitable(False)
+ channel = self.make_request(
+ method="POST",
+ path="/rooms/" + self.room_id + "/invite",
+ content={
+ "id_server": "example.com",
+ "id_access_token": "sometoken",
+ "medium": "email",
+ "address": email_to_invite,
+ },
+ access_token=self.tok,
+ )
+ self.assertEquals(channel.code, 403)
+
+ # Also check that it stopped before calling _make_and_store_3pid_invite.
+ make_invite_mock.assert_called_once()
--
cgit 1.5.1
From e564bdd1276d8eb8ea3eabc0442a58fb18cd8731 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 6 Oct 2021 18:09:35 +0100
Subject: Add content to the Synapse documentation intro page (#10990)
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
changelog.d/10990.doc | 1 +
docs/development/contributing_guide.md | 2 +-
docs/welcome_and_overview.md | 74 ++++++++++++++++++++++++++++++++++
3 files changed, 76 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10990.doc
diff --git a/changelog.d/10990.doc b/changelog.d/10990.doc
new file mode 100644
index 0000000000..51290d6200
--- /dev/null
+++ b/changelog.d/10990.doc
@@ -0,0 +1 @@
+Add additional content to the Welcome and Overview page of the documentation.
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 713366368c..580a4f7f98 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -63,7 +63,7 @@ TBD
# 5. Get in touch.
-Join our developer community on Matrix: #synapse-dev:matrix.org !
+Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)!
# 6. Pick an issue.
diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md
index 9882d9f159..aab2d6b4f0 100644
--- a/docs/welcome_and_overview.md
+++ b/docs/welcome_and_overview.md
@@ -3,3 +3,77 @@
Welcome to the documentation repository for Synapse, a
[Matrix](https://matrix.org) homeserver implementation developed by the matrix.org core
team.
+
+## Installing and using Synapse
+
+This documentation covers topics for **installation**, **configuration** and
+**maintainence** of your Synapse process:
+
+* Learn how to [install](setup/installation.md) and
+ [configure](usage/configuration/index.html) your own instance, perhaps with [Single
+ Sign-On](usage/configuration/user_authentication/index.html).
+
+* See how to [upgrade](upgrade.md) between Synapse versions.
+
+* Administer your instance using the [Admin
+ API](usage/administration/admin_api/index.html), installing [pluggable
+ modules](modules/index.html), or by accessing the [manhole](manhole.md).
+
+* Learn how to [read log lines](usage/administration/request_log.md), configure
+ [logging](usage/configuration/logging_sample_config.md) or set up [structured
+ logging](structured_logging.md).
+
+* Scale Synapse through additional [worker processes](workers.md).
+
+* Set up [monitoring and metrics](metrics-howto.md) to keep an eye on your
+ Synapse instance's performance.
+
+## Developing on Synapse
+
+Contributions are welcome! Synapse is primarily written in
+[Python](https://python.org). As a developer, you may be interested in the
+following documentation:
+
+* Read the [Contributing Guide](development/contributing_guide.md). It is meant
+ to walk new contributors through the process of developing and submitting a
+ change to the Synapse codebase (which is [hosted on
+ GitHub](https://github.com/matrix-org/synapse)).
+
+* Set up your [development
+ environment](development/contributing_guide.md#2-what-do-i-need), then learn
+ how to [lint](development/contributing_guide.md#run-the-linters) and
+ [test](development/contributing_guide.md#8-test-test-test) your code.
+
+* Look at [the issue tracker](https://github.com/matrix-org/synapse/issues) for
+ bugs to fix or features to add. If you're new, it may be best to start with
+ those labeled [good first
+ issue](https://github.com/matrix-org/synapse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22).
+
+* Understand [how Synapse is
+ built](development/internal_documentation/index.html), how to [migrate
+ database schemas](development/database_schema.md), learn about
+ [federation](federate.md) and how to [set up a local
+ federation](federate.md#running-a-demo-federation-of-synapses) for development.
+
+* We like to keep our `git` history clean. [Learn](development/git.md) how to
+ do so!
+
+* And finally, contribute to this documentation! The source for which is
+ [located here](https://github.com/matrix-org/synapse/tree/develop/docs).
+
+## Donating to Synapse development
+
+Want to help keep Synapse going but don't know how to code? Synapse is a
+[Matrix.org Foundation](https://matrix.org) project. Consider becoming a
+supportor on [Liberapay](https://liberapay.com/matrixdotorg),
+[Patreon](https://patreon.com/matrixdotorg) or through
+[PayPal](https://paypal.me/matrixdotorg) via a one-time donation.
+
+If you are an organisation or enterprise and would like to sponsor development,
+reach out to us over email at: support (at) matrix.org
+
+## Reporting a security vulnerability
+
+If you've found a security issue in Synapse or any other Matrix.org Foundation
+project, please report it to us in accordance with our [Security Disclosure
+Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
--
cgit 1.5.1
From f563676c097b830346acc7a4ce3e910c6b10c4c3 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Wed, 6 Oct 2021 18:55:25 +0100
Subject: `disallow-untyped-defs` for `synapse.state` (#11004)
* `disallow-untyped-defs` for `synapse.state`
Much smaller than I was expecting!
---
changelog.d/11004.misc | 1 +
mypy.ini | 3 +++
synapse/state/__init__.py | 2 +-
synapse/state/v1.py | 4 ++--
synapse/state/v2.py | 2 +-
5 files changed, 8 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/11004.misc
diff --git a/changelog.d/11004.misc b/changelog.d/11004.misc
new file mode 100644
index 0000000000..821033710a
--- /dev/null
+++ b/changelog.d/11004.misc
@@ -0,0 +1 @@
+Add further type hints to `synapse.state`.
\ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
index 86459bdcb6..a052d49c71 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -99,6 +99,9 @@ disallow_untyped_defs = True
[mypy-synapse.rest.*]
disallow_untyped_defs = True
+[mypy-synapse.state.*]
+disallow_untyped_defs = True
+
[mypy-synapse.util.batching_queue]
disallow_untyped_defs = True
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index c981df3f18..5cf2e12575 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -118,7 +118,7 @@ class _StateCacheEntry:
else:
self.state_id = _gen_state_id()
- def __len__(self):
+ def __len__(self) -> int:
return len(self.state)
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index 017e6fd92d..ffe6207a3c 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -225,7 +225,7 @@ def _resolve_with_state(
conflicted_state_ids: StateMap[Set[str]],
auth_event_ids: StateMap[str],
state_map: Dict[str, EventBase],
-):
+) -> MutableStateMap[str]:
conflicted_state = {}
for key, event_ids in conflicted_state_ids.items():
events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
@@ -362,7 +362,7 @@ def _resolve_normal_events(
def _ordered_events(events: Iterable[EventBase]) -> List[EventBase]:
- def key_func(e):
+ def key_func(e: EventBase) -> Tuple[int, str]:
# we have to use utf-8 rather than ascii here because it turns out we allow
# people to send us events with non-ascii event IDs :/
return -int(e.depth), hashlib.sha1(e.event_id.encode("utf-8")).hexdigest()
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 586b0e12fe..bd18eefd58 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -481,7 +481,7 @@ async def _reverse_topological_power_sort(
if idx % _AWAIT_AFTER_ITERATIONS == 0:
await clock.sleep(0)
- def _get_power_order(event_id):
+ def _get_power_order(event_id: str) -> Tuple[int, int, str]:
ev = event_map[event_id]
pl = event_to_pl[event_id]
--
cgit 1.5.1
From 52aefd50860f9b44f48a9b465d42f26faa4eb84f Mon Sep 17 00:00:00 2001
From: Brendan Abolivier
Date: Thu, 7 Oct 2021 12:37:10 +0200
Subject: Catch AttributeErrors when calling registerProducer (#10995)
Looks like the wrong exception type was caught in #10932.
---
changelog.d/10995.bugfix | 1 +
synapse/http/server.py | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/10995.bugfix
diff --git a/changelog.d/10995.bugfix b/changelog.d/10995.bugfix
new file mode 100644
index 0000000000..3eef96f3db
--- /dev/null
+++ b/changelog.d/10995.bugfix
@@ -0,0 +1 @@
+Correct a bugfix introduced in Synapse v1.44.0 that wouldn't catch every error of the connection breaks before a response could be written to it.
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 0df1bfbeef..897ba5e453 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -563,7 +563,10 @@ class _ByteProducer:
try:
self._request.registerProducer(self, True)
- except RuntimeError as e:
+ except AttributeError as e:
+ # Calling self._request.registerProducer might raise an AttributeError since
+ # the underlying Twisted code calls self._request.channel.registerProducer,
+ # however self._request.channel will be None if the connection was lost.
logger.info("Connection disconnected before response was written: %r", e)
# We drop our references to data we'll not use.
--
cgit 1.5.1
From 86af6b2f0ef92a317900fd4a4f6d3436ff8a011c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 7 Oct 2021 12:20:03 +0100
Subject: Add a comment in _process_received_pdu (#11011)
---
changelog.d/11011.misc | 1 +
synapse/handlers/federation_event.py | 3 +++
2 files changed, 4 insertions(+)
create mode 100644 changelog.d/11011.misc
diff --git a/changelog.d/11011.misc b/changelog.d/11011.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/11011.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 243be46267..0645ce9392 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -894,6 +894,9 @@ class FederationEventHandler:
backfilled=backfilled,
)
except AuthError as e:
+ # FIXME richvdh 2021/10/07 I don't think this is reachable. Let's log it
+ # for now
+ logger.exception("Unexpected AuthError from _check_event_auth")
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
await self._run_push_actions_and_persist_event(event, context, backfilled)
--
cgit 1.5.1
From 96fe77c2546598449c1d423c125f84c92620b155 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 7 Oct 2021 12:43:25 +0100
Subject: Improve the logging in _auth_and_persist_outliers (#11010)
Include the event ids being peristed
---
changelog.d/11010.misc | 1 +
synapse/handlers/federation_event.py | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/11010.misc
diff --git a/changelog.d/11010.misc b/changelog.d/11010.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/11010.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 0645ce9392..f640b417b3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -1161,7 +1161,10 @@ class FederationEventHandler:
return
logger.info(
- "Persisting %i of %i remaining events", len(roots), len(event_map)
+ "Persisting %i of %i remaining outliers: %s",
+ len(roots),
+ len(event_map),
+ shortstr(e.event_id for e in roots),
)
await self._auth_and_persist_fetched_events_inner(origin, room_id, roots)
--
cgit 1.5.1
From e0bf34dada709776ae00843e47cd811d1cd195c6 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Thu, 7 Oct 2021 13:26:11 +0100
Subject: Don't alter directory entries for local users when setting a per-room
nickname (#11002)
Co-authored-by: Patrick Cloke
---
changelog.d/11002.bugfix | 1 +
synapse/handlers/user_directory.py | 20 +++++++++++++-------
tests/handlers/test_user_directory.py | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 48 insertions(+), 7 deletions(-)
create mode 100644 changelog.d/11002.bugfix
diff --git a/changelog.d/11002.bugfix b/changelog.d/11002.bugfix
new file mode 100644
index 0000000000..cf894a6314
--- /dev/null
+++ b/changelog.d/11002.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user_directory.
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 97f60b5806..b7b1973346 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -203,6 +203,7 @@ class UserDirectoryHandler(StateDeltasHandler):
public_value=Membership.JOIN,
)
+ is_remote = not self.is_mine_id(state_key)
if change is MatchChange.now_false:
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
@@ -224,15 +225,20 @@ class UserDirectoryHandler(StateDeltasHandler):
else:
logger.debug("Server is still in room: %r", room_id)
- include_in_dir = not self.is_mine_id(
- state_key
- ) or await self.store.should_include_local_user_in_dir(state_key)
+ include_in_dir = (
+ is_remote
+ or await self.store.should_include_local_user_in_dir(state_key)
+ )
if include_in_dir:
if change is MatchChange.no_change:
- # Handle any profile changes
- await self._handle_profile_change(
- state_key, room_id, prev_event_id, event_id
- )
+ # Handle any profile changes for remote users.
+ # (For local users we are not forced to scan membership
+ # events; instead the rest of the application calls
+ # `handle_local_profile_change`.)
+ if is_remote:
+ await self._handle_profile_change(
+ state_key, room_id, prev_event_id, event_id
+ )
continue
if change is MatchChange.now_true: # The user joined
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 03fd5a3e2c..47217f0542 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -402,6 +402,40 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
public3 = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
self.assertEqual(set(public3), {(alice, room2), (bob, room2)})
+ def test_per_room_profile_doesnt_alter_directory_entry(self) -> None:
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ bob = self.register_user("bob", "pass")
+
+ # Alice should have a user directory entry created at registration.
+ users = self.get_success(self.user_dir_helper.get_profiles_in_user_directory())
+ self.assertEqual(
+ users[alice], ProfileInfo(display_name="alice", avatar_url=None)
+ )
+
+ # Alice makes a room for herself.
+ room = self.helper.create_room_as(alice, is_public=True, tok=alice_token)
+
+ # Alice sets a nickname unique to that room.
+ self.helper.send_state(
+ room,
+ "m.room.member",
+ {
+ "displayname": "Freddy Mercury",
+ "membership": "join",
+ },
+ alice_token,
+ state_key=alice,
+ )
+
+ # Alice's display name remains the same in the user directory.
+ search_result = self.get_success(self.handler.search_users(bob, alice, 10))
+ self.assertEqual(
+ search_result["results"],
+ [{"display_name": "alice", "avatar_url": None, "user_id": alice}],
+ 0,
+ )
+
def test_private_room(self) -> None:
"""
A user can be searched for only by people that are either in a public
--
cgit 1.5.1
From 7301019d48f1a4ca7683b1745be55cecc6fe4be3 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Thu, 7 Oct 2021 09:38:31 -0400
Subject: Ensure each cache config test uses separate state. (#11019)
Hopefully this fixes these tests sometimes failing in CI.
---
changelog.d/11019.misc | 1 +
tests/config/test_cache.py | 20 ++++++++++++--------
2 files changed, 13 insertions(+), 8 deletions(-)
create mode 100644 changelog.d/11019.misc
diff --git a/changelog.d/11019.misc b/changelog.d/11019.misc
new file mode 100644
index 0000000000..aae5ee62b2
--- /dev/null
+++ b/changelog.d/11019.misc
@@ -0,0 +1 @@
+Ensure that cache config tests do not share state.
diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py
index f518abdb7a..79d417568d 100644
--- a/tests/config/test_cache.py
+++ b/tests/config/test_cache.py
@@ -12,12 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from unittest.mock import patch
+
from synapse.config.cache import CacheConfig, add_resizable_cache
from synapse.util.caches.lrucache import LruCache
from tests.unittest import TestCase
+# Patch the global _CACHES so that each test runs against its own state.
+@patch("synapse.config.cache._CACHES", new_callable=dict)
class CacheConfigTests(TestCase):
def setUp(self):
# Reset caches before each test
@@ -26,7 +30,7 @@ class CacheConfigTests(TestCase):
def tearDown(self):
self.config.reset()
- def test_individual_caches_from_environ(self):
+ def test_individual_caches_from_environ(self, _caches):
"""
Individual cache factors will be loaded from the environment.
"""
@@ -39,7 +43,7 @@ class CacheConfigTests(TestCase):
self.assertEqual(dict(self.config.cache_factors), {"something_or_other": 2.0})
- def test_config_overrides_environ(self):
+ def test_config_overrides_environ(self, _caches):
"""
Individual cache factors defined in the environment will take precedence
over those in the config.
@@ -56,7 +60,7 @@ class CacheConfigTests(TestCase):
{"foo": 1.0, "bar": 3.0, "something_or_other": 2.0},
)
- def test_individual_instantiated_before_config_load(self):
+ def test_individual_instantiated_before_config_load(self, _caches):
"""
If a cache is instantiated before the config is read, it will be given
the default cache size in the interim, and then resized once the config
@@ -72,7 +76,7 @@ class CacheConfigTests(TestCase):
self.assertEqual(cache.max_size, 300)
- def test_individual_instantiated_after_config_load(self):
+ def test_individual_instantiated_after_config_load(self, _caches):
"""
If a cache is instantiated after the config is read, it will be
immediately resized to the correct size given the per_cache_factor if
@@ -85,7 +89,7 @@ class CacheConfigTests(TestCase):
add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor)
self.assertEqual(cache.max_size, 200)
- def test_global_instantiated_before_config_load(self):
+ def test_global_instantiated_before_config_load(self, _caches):
"""
If a cache is instantiated before the config is read, it will be given
the default cache size in the interim, and then resized to the new
@@ -100,7 +104,7 @@ class CacheConfigTests(TestCase):
self.assertEqual(cache.max_size, 400)
- def test_global_instantiated_after_config_load(self):
+ def test_global_instantiated_after_config_load(self, _caches):
"""
If a cache is instantiated after the config is read, it will be
immediately resized to the correct size given the global factor if there
@@ -113,7 +117,7 @@ class CacheConfigTests(TestCase):
add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor)
self.assertEqual(cache.max_size, 150)
- def test_cache_with_asterisk_in_name(self):
+ def test_cache_with_asterisk_in_name(self, _caches):
"""Some caches have asterisks in their name, test that they are set correctly."""
config = {
@@ -139,7 +143,7 @@ class CacheConfigTests(TestCase):
add_resizable_cache("*cache_c*", cache_resize_callback=cache_c.set_cache_factor)
self.assertEqual(cache_c.max_size, 200)
- def test_apply_cache_factor_from_config(self):
+ def test_apply_cache_factor_from_config(self, _caches):
"""Caches can disable applying cache factor updates, mainly used by
event cache size.
"""
--
cgit 1.5.1
From e79ee48313404abf8fbb7c88361e4ab1efa29a81 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Thu, 7 Oct 2021 19:55:15 +0100
Subject: disallow-untyped-defs for synapse.server_notices (#11021)
---
changelog.d/11021.misc | 1 +
mypy.ini | 3 +++
synapse/server_notices/server_notices_manager.py | 8 ++------
3 files changed, 6 insertions(+), 6 deletions(-)
create mode 100644 changelog.d/11021.misc
diff --git a/changelog.d/11021.misc b/changelog.d/11021.misc
new file mode 100644
index 0000000000..8ac1bfcf22
--- /dev/null
+++ b/changelog.d/11021.misc
@@ -0,0 +1 @@
+Add additional type hints to `synapse.server_notices`.
\ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
index a052d49c71..68437e5ce1 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -99,6 +99,9 @@ disallow_untyped_defs = True
[mypy-synapse.rest.*]
disallow_untyped_defs = True
+[mypy-synapse.server_notices.*]
+disallow_untyped_defs = True
+
[mypy-synapse.state.*]
disallow_untyped_defs = True
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index cd1c5ff6f4..0cf60236f8 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -41,12 +41,8 @@ class ServerNoticesManager:
self._notifier = hs.get_notifier()
self.server_notices_mxid = self._config.servernotices.server_notices_mxid
- def is_enabled(self):
- """Checks if server notices are enabled on this server.
-
- Returns:
- bool
- """
+ def is_enabled(self) -> bool:
+ """Checks if server notices are enabled on this server."""
return self.server_notices_mxid is not None
async def send_notice(
--
cgit 1.5.1
From 0b4d5ce5e34ab46b5b55976bfdd0d1d0b105cf13 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 8 Oct 2021 10:05:48 +0100
Subject: Fix CI to run the unit tests without optional deps (#11017)
This also turns off calculating code coverage, as we didn't use it and it was a lot of noise
---
.github/workflows/tests.yml | 9 ++++++---
changelog.d/11017.misc | 1 +
2 files changed, 7 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/11017.misc
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 96c39dd9a4..30a911fdbd 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -78,20 +78,23 @@ jobs:
matrix:
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
+ toxenv: ["py"]
include:
# Newest Python without optional deps
- python-version: "3.10"
- toxenv: "py-noextras,combine"
+ toxenv: "py-noextras"
# Oldest Python with PostgreSQL
- python-version: "3.6"
database: "postgres"
postgres-version: "9.6"
+ toxenv: "py"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
+ toxenv: "py"
steps:
- uses: actions/checkout@v2
@@ -111,7 +114,7 @@ jobs:
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- - run: tox -e py,combine
+ - run: tox -e ${{ matrix.toxenv }}
env:
TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
@@ -169,7 +172,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- run: pip install tox
- - run: tox -e py,combine
+ - run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
diff --git a/changelog.d/11017.misc b/changelog.d/11017.misc
new file mode 100644
index 0000000000..f05530ac94
--- /dev/null
+++ b/changelog.d/11017.misc
@@ -0,0 +1 @@
+Fix CI to run the unit tests without optional deps.
--
cgit 1.5.1
From bb228f35237879b0cae93e3b5efab468b94a1e5b Mon Sep 17 00:00:00 2001
From: Nick Barrett
Date: Fri, 8 Oct 2021 12:08:25 +0100
Subject: Include exception in json logging (#11028)
---
changelog.d/11028.feature | 1 +
synapse/logging/_terse_json.py | 6 ++++++
tests/logging/test_terse_json.py | 28 ++++++++++++++++++++++++++++
3 files changed, 35 insertions(+)
create mode 100644 changelog.d/11028.feature
diff --git a/changelog.d/11028.feature b/changelog.d/11028.feature
new file mode 100644
index 0000000000..48798356b7
--- /dev/null
+++ b/changelog.d/11028.feature
@@ -0,0 +1 @@
+Include exception information in JSON logging output. Contributed by @Fizzadar at Beeper.
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 6e82f7c7f1..b78d6e17c9 100644
--- a/synapse/logging/_terse_json.py
+++ b/synapse/logging/_terse_json.py
@@ -65,6 +65,12 @@ class JsonFormatter(logging.Formatter):
if key not in _IGNORED_LOG_RECORD_ATTRIBUTES:
event[key] = value
+ if record.exc_info:
+ exc_type, exc_value, _ = record.exc_info
+ if exc_type:
+ event["exc_type"] = f"{exc_type.__name__}"
+ event["exc_value"] = f"{exc_value}"
+
return _encoder.encode(event)
diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py
index f73fcd684e..96f399b7ab 100644
--- a/tests/logging/test_terse_json.py
+++ b/tests/logging/test_terse_json.py
@@ -198,3 +198,31 @@ class TerseJsonTestCase(LoggerCleanupMixin, TestCase):
self.assertEqual(log["url"], "/_matrix/client/versions")
self.assertEqual(log["protocol"], "1.1")
self.assertEqual(log["user_agent"], "")
+
+ def test_with_exception(self):
+ """
+ The logging exception type & value should be added to the JSON response.
+ """
+ handler = logging.StreamHandler(self.output)
+ handler.setFormatter(JsonFormatter())
+ logger = self.get_logger(handler)
+
+ try:
+ raise ValueError("That's wrong, you wally!")
+ except ValueError:
+ logger.exception("Hello there, %s!", "wally")
+
+ log = self.get_log_line()
+
+ # The terse logger should give us these keys.
+ expected_log_keys = [
+ "log",
+ "level",
+ "namespace",
+ "exc_type",
+ "exc_value",
+ ]
+ self.assertCountEqual(log.keys(), expected_log_keys)
+ self.assertEqual(log["log"], "Hello there, wally!")
+ self.assertEqual(log["exc_type"], "ValueError")
+ self.assertEqual(log["exc_value"], "That's wrong, you wally!")
--
cgit 1.5.1
From 49a683d871add82fb1a8125c6803ac15ec7d341b Mon Sep 17 00:00:00 2001
From: Sean Quah <8349537+squahtx@users.noreply.github.com>
Date: Fri, 8 Oct 2021 12:27:16 +0100
Subject: Fix long-standing bug where `ReadWriteLock` could drop logging
contexts (#10993)
Use `PreserveLoggingContext()` to ensure that logging contexts are not
lost when exiting a read/write lock.
When exiting a read/write lock, callbacks on a `Deferred` are triggered
as a signal to any waiting coroutines. Any waiting coroutine that
becomes runnable is likely to follow the Synapse logging context rules
and will restore its own logging context, then either run to completion
or await another `Deferred`, resetting the logging context in the
process.
---
changelog.d/10993.misc | 1 +
synapse/util/async_helpers.py | 6 ++++--
2 files changed, 5 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/10993.misc
diff --git a/changelog.d/10993.misc b/changelog.d/10993.misc
new file mode 100644
index 0000000000..23c73dbac5
--- /dev/null
+++ b/changelog.d/10993.misc
@@ -0,0 +1 @@
+Fix a long-standing bug where `ReadWriteLock`s could drop logging contexts on exit.
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 82d918a05f..5df80ea8e7 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -438,7 +438,8 @@ class ReadWriteLock:
try:
yield
finally:
- new_defer.callback(None)
+ with PreserveLoggingContext():
+ new_defer.callback(None)
self.key_to_current_readers.get(key, set()).discard(new_defer)
return _ctx_manager()
@@ -466,7 +467,8 @@ class ReadWriteLock:
try:
yield
finally:
- new_defer.callback(None)
+ with PreserveLoggingContext():
+ new_defer.callback(None)
if self.key_to_current_writer[key] == new_defer:
self.key_to_current_writer.pop(key)
--
cgit 1.5.1
From eb9ddc8c2e807e691fd1820f88f7c0bf43822661 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Fri, 8 Oct 2021 07:44:43 -0400
Subject: Remove the deprecated BaseHandler. (#11005)
The shared ratelimit function was replaced with a dedicated
RequestRatelimiter class (accessible from the HomeServer
object).
Other properties were copied to each sub-class that inherited
from BaseHandler.
---
changelog.d/11005.misc | 1 +
synapse/api/ratelimiting.py | 86 +++++++++++++++++++++++
synapse/handlers/_base.py | 120 ---------------------------------
synapse/handlers/admin.py | 7 +-
synapse/handlers/auth.py | 8 +--
synapse/handlers/deactivate_account.py | 6 +-
synapse/handlers/device.py | 10 +--
synapse/handlers/directory.py | 9 ++-
synapse/handlers/events.py | 12 ++--
synapse/handlers/federation.py | 6 +-
synapse/handlers/identity.py | 7 +-
synapse/handlers/initial_sync.py | 8 +--
synapse/handlers/message.py | 7 +-
synapse/handlers/profile.py | 11 +--
synapse/handlers/read_marker.py | 5 +-
synapse/handlers/receipts.py | 6 +-
synapse/handlers/register.py | 9 ++-
synapse/handlers/room.py | 15 +++--
synapse/handlers/room_list.py | 7 +-
synapse/handlers/room_member.py | 8 +--
synapse/handlers/saml.py | 7 +-
synapse/handlers/search.py | 9 +--
synapse/handlers/set_password.py | 6 +-
synapse/server.py | 11 ++-
24 files changed, 166 insertions(+), 215 deletions(-)
create mode 100644 changelog.d/11005.misc
delete mode 100644 synapse/handlers/_base.py
diff --git a/changelog.d/11005.misc b/changelog.d/11005.misc
new file mode 100644
index 0000000000..a893591971
--- /dev/null
+++ b/changelog.d/11005.misc
@@ -0,0 +1 @@
+Remove the deprecated `BaseHandler` object.
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index cbdd74025b..e8964097d3 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -17,6 +17,7 @@ from collections import OrderedDict
from typing import Hashable, Optional, Tuple
from synapse.api.errors import LimitExceededError
+from synapse.config.ratelimiting import RateLimitConfig
from synapse.storage.databases.main import DataStore
from synapse.types import Requester
from synapse.util import Clock
@@ -233,3 +234,88 @@ class Ratelimiter:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
+
+
+class RequestRatelimiter:
+ def __init__(
+ self,
+ store: DataStore,
+ clock: Clock,
+ rc_message: RateLimitConfig,
+ rc_admin_redaction: Optional[RateLimitConfig],
+ ):
+ self.store = store
+ self.clock = clock
+
+ # The rate_hz and burst_count are overridden on a per-user basis
+ self.request_ratelimiter = Ratelimiter(
+ store=self.store, clock=self.clock, rate_hz=0, burst_count=0
+ )
+ self._rc_message = rc_message
+
+ # Check whether ratelimiting room admin message redaction is enabled
+ # by the presence of rate limits in the config
+ if rc_admin_redaction:
+ self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
+ store=self.store,
+ clock=self.clock,
+ rate_hz=rc_admin_redaction.per_second,
+ burst_count=rc_admin_redaction.burst_count,
+ )
+ else:
+ self.admin_redaction_ratelimiter = None
+
+ async def ratelimit(
+ self,
+ requester: Requester,
+ update: bool = True,
+ is_admin_redaction: bool = False,
+ ) -> None:
+ """Ratelimits requests.
+
+ Args:
+ requester
+ update: Whether to record that a request is being processed.
+ Set to False when doing multiple checks for one request (e.g.
+ to check up front if we would reject the request), and set to
+ True for the last call for a given request.
+ is_admin_redaction: Whether this is a room admin/moderator
+ redacting an event. If so then we may apply different
+ ratelimits depending on config.
+
+ Raises:
+ LimitExceededError if the request should be ratelimited
+ """
+ user_id = requester.user.to_string()
+
+ # The AS user itself is never rate limited.
+ app_service = self.store.get_app_service_by_user_id(user_id)
+ if app_service is not None:
+ return # do not ratelimit app service senders
+
+ messages_per_second = self._rc_message.per_second
+ burst_count = self._rc_message.burst_count
+
+ # Check if there is a per user override in the DB.
+ override = await self.store.get_ratelimit_for_user(user_id)
+ if override:
+ # If overridden with a null Hz then ratelimiting has been entirely
+ # disabled for the user
+ if not override.messages_per_second:
+ return
+
+ messages_per_second = override.messages_per_second
+ burst_count = override.burst_count
+
+ if is_admin_redaction and self.admin_redaction_ratelimiter:
+ # If we have separate config for admin redactions, use a separate
+ # ratelimiter as to not have user_ids clash
+ await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
+ else:
+ # Override rate and burst count per-user
+ await self.request_ratelimiter.ratelimit(
+ requester,
+ rate_hz=messages_per_second,
+ burst_count=burst_count,
+ update=update,
+ )
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
deleted file mode 100644
index 0ccef884e7..0000000000
--- a/synapse/handlers/_base.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2014 - 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import TYPE_CHECKING, Optional
-
-from synapse.api.ratelimiting import Ratelimiter
-from synapse.types import Requester
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-class BaseHandler:
- """
- Common base class for the event handlers.
-
- Deprecated: new code should not use this. Instead, Handler classes should define the
- fields they actually need. The utility methods should either be factored out to
- standalone helper functions, or to different Handler classes.
- """
-
- def __init__(self, hs: "HomeServer"):
- self.store = hs.get_datastore()
- self.auth = hs.get_auth()
- self.notifier = hs.get_notifier()
- self.state_handler = hs.get_state_handler()
- self.distributor = hs.get_distributor()
- self.clock = hs.get_clock()
- self.hs = hs
-
- # The rate_hz and burst_count are overridden on a per-user basis
- self.request_ratelimiter = Ratelimiter(
- store=self.store, clock=self.clock, rate_hz=0, burst_count=0
- )
- self._rc_message = self.hs.config.ratelimiting.rc_message
-
- # Check whether ratelimiting room admin message redaction is enabled
- # by the presence of rate limits in the config
- if self.hs.config.ratelimiting.rc_admin_redaction:
- self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
- store=self.store,
- clock=self.clock,
- rate_hz=self.hs.config.ratelimiting.rc_admin_redaction.per_second,
- burst_count=self.hs.config.ratelimiting.rc_admin_redaction.burst_count,
- )
- else:
- self.admin_redaction_ratelimiter = None
-
- self.server_name = hs.hostname
-
- self.event_builder_factory = hs.get_event_builder_factory()
-
- async def ratelimit(
- self,
- requester: Requester,
- update: bool = True,
- is_admin_redaction: bool = False,
- ) -> None:
- """Ratelimits requests.
-
- Args:
- requester
- update: Whether to record that a request is being processed.
- Set to False when doing multiple checks for one request (e.g.
- to check up front if we would reject the request), and set to
- True for the last call for a given request.
- is_admin_redaction: Whether this is a room admin/moderator
- redacting an event. If so then we may apply different
- ratelimits depending on config.
-
- Raises:
- LimitExceededError if the request should be ratelimited
- """
- user_id = requester.user.to_string()
-
- # The AS user itself is never rate limited.
- app_service = self.store.get_app_service_by_user_id(user_id)
- if app_service is not None:
- return # do not ratelimit app service senders
-
- messages_per_second = self._rc_message.per_second
- burst_count = self._rc_message.burst_count
-
- # Check if there is a per user override in the DB.
- override = await self.store.get_ratelimit_for_user(user_id)
- if override:
- # If overridden with a null Hz then ratelimiting has been entirely
- # disabled for the user
- if not override.messages_per_second:
- return
-
- messages_per_second = override.messages_per_second
- burst_count = override.burst_count
-
- if is_admin_redaction and self.admin_redaction_ratelimiter:
- # If we have separate config for admin redactions, use a separate
- # ratelimiter as to not have user_ids clash
- await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
- else:
- # Override rate and burst count per-user
- await self.request_ratelimiter.ratelimit(
- requester,
- rate_hz=messages_per_second,
- burst_count=burst_count,
- update=update,
- )
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index bfa7f2c545..a53cd62d3c 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -21,18 +21,15 @@ from synapse.events import EventBase
from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class AdminHandler(BaseHandler):
+class AdminHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_store = self.storage.state
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 2d0f3d566c..f4612a5b92 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -52,7 +52,6 @@ from synapse.api.errors import (
UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
-from synapse.handlers._base import BaseHandler
from synapse.handlers.ui_auth import (
INTERACTIVE_AUTH_CHECKERS,
UIAuthSessionDataConstants,
@@ -186,12 +185,13 @@ class LoginTokenAttributes:
auth_provider_id = attr.ib(type=str)
-class AuthHandler(BaseHandler):
+class AuthHandler:
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
self.checkers: Dict[str, UserInteractiveAuthChecker] = {}
for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
inst = auth_checker_class(hs)
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 12bdca7445..e88c3c27ce 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -19,19 +19,17 @@ from synapse.api.errors import SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import Requester, UserID, create_requester
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class DeactivateAccountHandler(BaseHandler):
+class DeactivateAccountHandler:
"""Handler which deals with deactivating user accounts."""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
self.hs = hs
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 35334725d7..75e6019760 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -40,8 +40,6 @@ from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.util.retryutils import NotRetryingDestination
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -50,14 +48,16 @@ logger = logging.getLogger(__name__)
MAX_DEVICE_DISPLAY_NAME_LEN = 100
-class DeviceWorkerHandler(BaseHandler):
+class DeviceWorkerHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.clock = hs.get_clock()
self.hs = hs
+ self.store = hs.get_datastore()
+ self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
self.state_store = hs.get_storage().state
self._auth_handler = hs.get_auth_handler()
+ self.server_name = hs.hostname
@trace
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 9078781d5a..14ed7d9879 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -31,18 +31,16 @@ from synapse.appservice import ApplicationService
from synapse.storage.databases.main.directory import RoomAliasMapping
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class DirectoryHandler(BaseHandler):
+class DirectoryHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.auth = hs.get_auth()
+ self.hs = hs
self.state = hs.get_state_handler()
self.appservice_handler = hs.get_application_service_handler()
self.event_creation_handler = hs.get_event_creation_handler()
@@ -51,6 +49,7 @@ class DirectoryHandler(BaseHandler):
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.require_membership = hs.config.server.require_membership_for_aliases
self.third_party_event_rules = hs.get_third_party_event_rules()
+ self.server_name = hs.hostname
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 4b3f037072..1f64534a8a 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -25,8 +25,6 @@ from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -34,11 +32,11 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class EventStreamHandler(BaseHandler):
+class EventStreamHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
self.clock = hs.get_clock()
+ self.hs = hs
self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
@@ -138,9 +136,9 @@ class EventStreamHandler(BaseHandler):
return chunk
-class EventHandler(BaseHandler):
+class EventHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
self.storage = hs.get_storage()
async def get_event(
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 043ca4a224..3e341bd287 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -53,7 +53,6 @@ from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.federation.federation_client import InvalidResponseError
-from synapse.handlers._base import BaseHandler
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
@@ -78,15 +77,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class FederationHandler(BaseHandler):
+class FederationHandler:
"""Handles general incoming federation requests
Incoming events are *not* handled here, for which see FederationEventHandler.
"""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
self.hs = hs
self.store = hs.get_datastore()
@@ -99,6 +96,7 @@ class FederationHandler(BaseHandler):
self.is_mine_id = hs.is_mine_id
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.event_builder_factory = hs.get_event_builder_factory()
self._event_auth_handler = hs.get_event_auth_handler()
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.config = hs.config
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index c881475c25..9c319b5383 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -39,8 +39,6 @@ from synapse.util.stringutils import (
valid_id_server_location,
)
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -49,10 +47,9 @@ logger = logging.getLogger(__name__)
id_server_scheme = "https://"
-class IdentityHandler(BaseHandler):
+class IdentityHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
# An HTTP client for contacting trusted URLs.
self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients.
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 9ad39a65d8..d4e4556155 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -31,8 +31,6 @@ from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -40,9 +38,11 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class InitialSyncHandler(BaseHandler):
+class InitialSyncHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.state_handler = hs.get_state_handler()
self.hs = hs
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index ccd7827207..4de9f4b828 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -62,8 +62,6 @@ from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.server import HomeServer
@@ -433,8 +431,7 @@ class EventCreationHandler:
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
- # This is only used to get at ratelimit function
- self.base_handler = BaseHandler(hs)
+ self.request_ratelimiter = hs.get_request_ratelimiter()
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
@@ -1322,7 +1319,7 @@ class EventCreationHandler:
original_event and event.sender != original_event.sender
)
- await self.base_handler.ratelimit(
+ await self.request_ratelimiter.ratelimit(
requester, is_admin_redaction=is_admin_redaction
)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 2e19706c69..e6c3cf585b 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -32,8 +32,6 @@ from synapse.types import (
get_domain_from_id,
)
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -43,7 +41,7 @@ MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000
-class ProfileHandler(BaseHandler):
+class ProfileHandler:
"""Handles fetching and updating user profile information.
ProfileHandler can be instantiated directly on workers and will
@@ -54,7 +52,9 @@ class ProfileHandler(BaseHandler):
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ self.hs = hs
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
@@ -62,6 +62,7 @@ class ProfileHandler(BaseHandler):
)
self.user_directory_handler = hs.get_user_directory_handler()
+ self.request_ratelimiter = hs.get_request_ratelimiter()
if hs.config.worker.run_background_tasks:
self.clock.looping_call(
@@ -346,7 +347,7 @@ class ProfileHandler(BaseHandler):
if not self.hs.is_mine(target_user):
return
- await self.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
# Do not actually update the room state for shadow-banned users.
if requester.shadow_banned:
diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py
index bd8160e7ed..58593e570e 100644
--- a/synapse/handlers/read_marker.py
+++ b/synapse/handlers/read_marker.py
@@ -17,17 +17,14 @@ from typing import TYPE_CHECKING
from synapse.util.async_helpers import Linearizer
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class ReadMarkerHandler(BaseHandler):
+class ReadMarkerHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
self.server_name = hs.config.server.server_name
self.store = hs.get_datastore()
self.account_data_handler = hs.get_account_data_handler()
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index f21f33ada2..374e961e3b 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -16,7 +16,6 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from synapse.api.constants import ReadReceiptEventFields
from synapse.appservice import ApplicationService
-from synapse.handlers._base import BaseHandler
from synapse.streams import EventSource
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
@@ -26,10 +25,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class ReceiptsHandler(BaseHandler):
+class ReceiptsHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.notifier = hs.get_notifier()
self.server_name = hs.config.server.server_name
self.store = hs.get_datastore()
self.event_auth_handler = hs.get_event_auth_handler()
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 441af7a848..a0e6a01775 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -41,8 +41,6 @@ from synapse.spam_checker_api import RegistrationBehaviour
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID, create_requester
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -85,9 +83,10 @@ class LoginDict(TypedDict):
refresh_token: Optional[str]
-class RegistrationHandler(BaseHandler):
+class RegistrationHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
self.hs = hs
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
@@ -515,7 +514,7 @@ class RegistrationHandler(BaseHandler):
# we don't have a local user in the room to craft up an invite with.
requires_invite = await self.store.is_host_joined(
room_id,
- self.server_name,
+ self._server_name,
)
if requires_invite:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index d40dbd761d..7072bca1fc 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -76,8 +76,6 @@ from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -88,15 +86,18 @@ id_server_scheme = "https://"
FIVE_MINUTES_IN_MS = 5 * 60 * 1000
-class RoomCreationHandler(BaseHandler):
+class RoomCreationHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+ self.hs = hs
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self.config = hs.config
+ self.request_ratelimiter = hs.get_request_ratelimiter()
# Room state based off defined presets
self._presets_dict: Dict[str, Dict[str, Any]] = {
@@ -162,7 +163,7 @@ class RoomCreationHandler(BaseHandler):
Raises:
ShadowBanError if the requester is shadow-banned.
"""
- await self.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
user_id = requester.user.to_string()
@@ -665,7 +666,7 @@ class RoomCreationHandler(BaseHandler):
raise SynapseError(403, "You are not permitted to create rooms")
if ratelimit:
- await self.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
"room_version", self.config.server.default_room_version.identifier
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index c3d4199ed1..ba7a14d651 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -36,8 +36,6 @@ from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util.caches.descriptors import _CacheContext, cached
from synapse.util.caches.response_cache import ResponseCache
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -49,9 +47,10 @@ REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
-class RoomListHandler(BaseHandler):
+class RoomListHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.hs = hs
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.response_cache: ResponseCache[
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index eef337feeb..74e6c7eca6 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -51,8 +51,6 @@ from synapse.types import (
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -118,9 +116,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
- # This is only used to get at the ratelimit function. It's fine there are
- # multiple of these as it doesn't store state.
- self.base_handler = BaseHandler(hs)
+ self.request_ratelimiter = hs.get_request_ratelimiter()
@abc.abstractmethod
async def _remote_join(
@@ -1275,7 +1271,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
- await self.base_handler.ratelimit(requester)
+ await self.request_ratelimiter.ratelimit(requester)
can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 2fed9f377a..727d75a50c 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -22,7 +22,6 @@ from saml2.client import Saml2Client
from synapse.api.errors import SynapseError
from synapse.config import ConfigError
-from synapse.handlers._base import BaseHandler
from synapse.handlers.sso import MappingException, UserAttributes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
@@ -51,9 +50,11 @@ class Saml2SessionData:
ui_auth_session_id: Optional[str] = None
-class SamlHandler(BaseHandler):
+class SamlHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ self.server_name = hs.hostname
self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config)
self._saml_idp_entityid = hs.config.saml2.saml2_idp_entityid
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 6d3333ee00..a3ffa26be8 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -26,17 +26,18 @@ from synapse.storage.state import StateFilter
from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class SearchHandler(BaseHandler):
+class SearchHandler:
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
+ self.state_handler = hs.get_state_handler()
+ self.clock = hs.get_clock()
+ self.hs = hs
self._event_serializer = hs.get_event_client_serializer()
self.storage = hs.get_storage()
self.state_store = self.storage.state
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index a63fac8283..706ad72761 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -17,19 +17,17 @@ from typing import TYPE_CHECKING, Optional
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.types import Requester
-from ._base import BaseHandler
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-class SetPasswordHandler(BaseHandler):
+class SetPasswordHandler:
"""Handler which deals with changing user account passwords"""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self.store = hs.get_datastore()
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
diff --git a/synapse/server.py b/synapse/server.py
index 637eb15b78..0783df41d4 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -39,7 +39,7 @@ from twisted.web.resource import IResource
from synapse.api.auth import Auth
from synapse.api.filtering import Filtering
-from synapse.api.ratelimiting import Ratelimiter
+from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter
from synapse.appservice.api import ApplicationServiceApi
from synapse.appservice.scheduler import ApplicationServiceScheduler
from synapse.config.homeserver import HomeServerConfig
@@ -816,3 +816,12 @@ class HomeServer(metaclass=abc.ABCMeta):
def should_send_federation(self) -> bool:
"Should this server be sending federation traffic directly?"
return self.config.worker.send_federation
+
+ @cache_in_self
+ def get_request_ratelimiter(self) -> RequestRatelimiter:
+ return RequestRatelimiter(
+ self.get_datastore(),
+ self.get_clock(),
+ self.config.ratelimiting.rc_message,
+ self.config.ratelimiting.rc_admin_redaction,
+ )
--
cgit 1.5.1
From 670a8d9a1e18159917ca1b4f8e5af48a0b258f5e Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Fri, 8 Oct 2021 12:52:48 +0100
Subject: Fix overwriting profile when making room public (#11003)
This splits apart `handle_new_user` into a function which adds an entry to the `user_directory` and a function which updates the room sharing tables. I plan to continue doing more of this kind of refactoring to clarify the implementation.
---
changelog.d/11003.bugfix | 1 +
synapse/handlers/user_directory.py | 63 +++++++++++++++++--------------
tests/handlers/test_user_directory.py | 71 ++++++++++++++++++++++++++++++++++-
3 files changed, 104 insertions(+), 31 deletions(-)
create mode 100644 changelog.d/11003.bugfix
diff --git a/changelog.d/11003.bugfix b/changelog.d/11003.bugfix
new file mode 100644
index 0000000000..0786f1b886
--- /dev/null
+++ b/changelog.d/11003.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public.
\ No newline at end of file
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index b7b1973346..8810f048ba 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -242,18 +242,15 @@ class UserDirectoryHandler(StateDeltasHandler):
continue
if change is MatchChange.now_true: # The user joined
- event = await self.store.get_event(event_id, allow_none=True)
- # It isn't expected for this event to not exist, but we
- # don't want the entire background process to break.
- if event is None:
- continue
-
- profile = ProfileInfo(
- avatar_url=event.content.get("avatar_url"),
- display_name=event.content.get("displayname"),
- )
-
- await self._handle_new_user(room_id, state_key, profile)
+ # This may be the first time we've seen a remote user. If
+ # so, ensure we have a directory entry for them. (We don't
+ # need to do this for local users: their directory entry
+ # is created at the point of registration.
+ if is_remote:
+ await self._upsert_directory_entry_for_remote_user(
+ state_key, event_id
+ )
+ await self._track_user_joined_room(room_id, state_key)
else: # The user left
await self._handle_remove_user(room_id, state_key)
else:
@@ -303,7 +300,7 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id
)
- logger.debug("Change: %r, publicness: %r", publicness, is_public)
+ logger.debug("Publicness change: %r, is_public: %r", publicness, is_public)
if publicness is MatchChange.now_true and not is_public:
# If we became world readable but room isn't currently public then
@@ -314,42 +311,50 @@ class UserDirectoryHandler(StateDeltasHandler):
# ignore the change
return
- other_users_in_room_with_profiles = (
- await self.store.get_users_in_room_with_profiles(room_id)
- )
+ users_in_room = await self.store.get_users_in_room(room_id)
# Remove every user from the sharing tables for that room.
- for user_id in other_users_in_room_with_profiles.keys():
+ for user_id in users_in_room:
await self.store.remove_user_who_share_room(user_id, room_id)
# Then, re-add them to the tables.
- # NOTE: this is not the most efficient method, as handle_new_user sets
+ # NOTE: this is not the most efficient method, as _track_user_joined_room sets
# up local_user -> other_user and other_user_whos_local -> local_user,
# which when ran over an entire room, will result in the same values
# being added multiple times. The batching upserts shouldn't make this
# too bad, though.
- for user_id, profile in other_users_in_room_with_profiles.items():
- await self._handle_new_user(room_id, user_id, profile)
+ for user_id in users_in_room:
+ await self._track_user_joined_room(room_id, user_id)
- async def _handle_new_user(
- self, room_id: str, user_id: str, profile: ProfileInfo
+ async def _upsert_directory_entry_for_remote_user(
+ self, user_id: str, event_id: str
) -> None:
- """Called when we might need to add user to directory
-
- Args:
- room_id: The room ID that user joined or started being public
- user_id
+ """A remote user has just joined a room. Ensure they have an entry in
+ the user directory. The caller is responsible for making sure they're
+ remote.
"""
+ event = await self.store.get_event(event_id, allow_none=True)
+ # It isn't expected for this event to not exist, but we
+ # don't want the entire background process to break.
+ if event is None:
+ return
+
logger.debug("Adding new user to dir, %r", user_id)
await self.store.update_profile_in_user_dir(
- user_id, profile.display_name, profile.avatar_url
+ user_id, event.content.get("displayname"), event.content.get("avatar_url")
)
+ async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
+ """Someone's just joined a room. Update `users_in_public_rooms` or
+ `users_who_share_private_rooms` as appropriate.
+
+ The caller is responsible for ensuring that the given user is not excluded
+ from the user directory.
+ """
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
room_id
)
- # Now we update users who share rooms with users.
other_users_in_room = await self.store.get_users_in_room(room_id)
if is_public:
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 47217f0542..db65253773 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -372,8 +372,6 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
# Alice makes two rooms. Bob joins one of them.
room1 = self.helper.create_room_as(alice, tok=alice_token)
room2 = self.helper.create_room_as(alice, tok=alice_token)
- print("room1=", room1)
- print("room2=", room2)
self.helper.join(room1, bob, tok=bob_token)
# The user sharing tables should have been updated.
@@ -436,6 +434,75 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
0,
)
+ def test_making_room_public_doesnt_alter_directory_entry(self) -> None:
+ """Per-room names shouldn't go to the directory when the room becomes public.
+
+ This isn't about preventing a leak (the room is now public, so the nickname
+ is too). It's about preserving the invariant that we only show a user's public
+ profile in the user directory results.
+
+ I made this a Synapse test case rather than a Complement one because
+ I think this is (strictly speaking) an implementation choice. Synapse
+ has chosen to only ever use the public profile when responding to a user
+ directory search. There's no privacy leak here, because making the room
+ public discloses the per-room name.
+
+ The spec doesn't mandate anything about _how_ a user
+ should appear in a /user_directory/search result. Hypothetical example:
+ suppose Bob searches for Alice. When representing Alice in a search
+ result, it's reasonable to use any of Alice's nicknames that Bob is
+ aware of. Heck, maybe we even want to use lots of them in a combined
+ displayname like `Alice (aka "ali", "ally", "41iC3")`.
+ """
+
+ # TODO the same should apply when Alice is a remote user.
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ bob = self.register_user("bob", "pass")
+ bob_token = self.login(bob, "pass")
+
+ # Alice and Bob are in a private room.
+ room = self.helper.create_room_as(alice, is_public=False, tok=alice_token)
+ self.helper.invite(room, src=alice, targ=bob, tok=alice_token)
+ self.helper.join(room, user=bob, tok=bob_token)
+
+ # Alice has a nickname unique to that room.
+
+ self.helper.send_state(
+ room,
+ "m.room.member",
+ {
+ "displayname": "Freddy Mercury",
+ "membership": "join",
+ },
+ alice_token,
+ state_key=alice,
+ )
+
+ # Check Alice isn't recorded as being in a public room.
+ public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ self.assertNotIn((alice, room), public)
+
+ # One of them makes the room public.
+ self.helper.send_state(
+ room,
+ "m.room.join_rules",
+ {"join_rule": "public"},
+ alice_token,
+ )
+
+ # Check that Alice is now recorded as being in a public room
+ public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
+ self.assertIn((alice, room), public)
+
+ # Alice's display name remains the same in the user directory.
+ search_result = self.get_success(self.handler.search_users(bob, alice, 10))
+ self.assertEqual(
+ search_result["results"],
+ [{"display_name": "alice", "avatar_url": None, "user_id": alice}],
+ 0,
+ )
+
def test_private_room(self) -> None:
"""
A user can be searched for only by people that are either in a public
--
cgit 1.5.1
From 797ee7812db28f6cf130d68e2d10911c826b0be5 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Fri, 8 Oct 2021 14:49:41 +0100
Subject: Relax `ignore-missing-imports` for modules that have stubs now and
update mypy (#11006)
Updating mypy past version 0.9 means that third-party stubs are no-longer distributed with typeshed. See http://mypy-lang.blogspot.com/2021/06/mypy-0900-released.html for details.
We therefore pull in stub packages in setup.py
Additionally, some modules that we were previously ignoring import failures for now have stubs. So let's use them.
The rest of this change consists of fixups to make the newer mypy + stubs pass CI.
Co-authored-by: Patrick Cloke
---
changelog.d/11006.misc | 1 +
mypy.ini | 69 +++++++++++++--------------
setup.py | 11 ++++-
synapse/config/tls.py | 9 ++--
synapse/http/client.py | 2 +-
synapse/logging/context.py | 16 +++----
synapse/metrics/background_process_metrics.py | 2 +-
synapse/push/mailer.py | 2 +-
synapse/rest/media/v1/__init__.py | 38 +++++----------
synapse/rest/media/v1/thumbnailer.py | 21 ++++++--
synapse/storage/prepare_database.py | 4 ++
synapse/util/__init__.py | 5 +-
12 files changed, 100 insertions(+), 80 deletions(-)
create mode 100644 changelog.d/11006.misc
diff --git a/changelog.d/11006.misc b/changelog.d/11006.misc
new file mode 100644
index 0000000000..7b4abae76a
--- /dev/null
+++ b/changelog.d/11006.misc
@@ -0,0 +1 @@
+Bump mypy version for CI to 0.910, and pull in new type stubs for dependencies.
\ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
index 68437e5ce1..e7cb80b6eb 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -198,98 +198,97 @@ disallow_untyped_defs = True
[mypy-tests.storage.test_user_directory]
disallow_untyped_defs = True
-[mypy-pymacaroons.*]
-ignore_missing_imports = True
+;; Dependencies without annotations
+;; Before ignoring a module, check to see if type stubs are available.
+;; The `typeshed` project maintains stubs here:
+;; https://github.com/python/typeshed/tree/master/stubs
+;; and for each package `foo` there's a corresponding `types-foo` package on PyPI,
+;; which we can pull in as a dev dependency by adding to `setup.py`'s
+;; `CONDITIONAL_REQUIREMENTS["mypy"]` list.
-[mypy-zope]
+[mypy-authlib.*]
ignore_missing_imports = True
[mypy-bcrypt]
ignore_missing_imports = True
-[mypy-constantly]
-ignore_missing_imports = True
-
-[mypy-twisted.*]
+[mypy-canonicaljson]
ignore_missing_imports = True
-[mypy-treq.*]
+[mypy-constantly]
ignore_missing_imports = True
-[mypy-hyperlink]
+[mypy-daemonize]
ignore_missing_imports = True
[mypy-h11]
ignore_missing_imports = True
-[mypy-msgpack]
-ignore_missing_imports = True
-
-[mypy-opentracing]
+[mypy-hiredis]
ignore_missing_imports = True
-[mypy-OpenSSL.*]
+[mypy-hyperlink]
ignore_missing_imports = True
-[mypy-netaddr]
+[mypy-ijson.*]
ignore_missing_imports = True
-[mypy-saml2.*]
+[mypy-jaeger_client.*]
ignore_missing_imports = True
-[mypy-canonicaljson]
+[mypy-josepy.*]
ignore_missing_imports = True
-[mypy-jaeger_client.*]
+[mypy-jwt.*]
ignore_missing_imports = True
-[mypy-jsonschema]
+[mypy-lxml]
ignore_missing_imports = True
-[mypy-signedjson.*]
+[mypy-msgpack]
ignore_missing_imports = True
-[mypy-prometheus_client.*]
+[mypy-nacl.*]
ignore_missing_imports = True
-[mypy-service_identity.*]
+[mypy-netaddr]
ignore_missing_imports = True
-[mypy-daemonize]
+[mypy-opentracing]
ignore_missing_imports = True
-[mypy-sentry_sdk]
+[mypy-phonenumbers.*]
ignore_missing_imports = True
-[mypy-PIL.*]
+[mypy-prometheus_client.*]
ignore_missing_imports = True
-[mypy-lxml]
+[mypy-pymacaroons.*]
ignore_missing_imports = True
-[mypy-jwt.*]
+[mypy-pympler.*]
ignore_missing_imports = True
-[mypy-authlib.*]
+[mypy-rust_python_jaeger_reporter.*]
ignore_missing_imports = True
-[mypy-rust_python_jaeger_reporter.*]
+[mypy-saml2.*]
ignore_missing_imports = True
-[mypy-nacl.*]
+[mypy-sentry_sdk]
ignore_missing_imports = True
-[mypy-hiredis]
+[mypy-service_identity.*]
ignore_missing_imports = True
-[mypy-josepy.*]
+[mypy-signedjson.*]
ignore_missing_imports = True
-[mypy-pympler.*]
+[mypy-treq.*]
ignore_missing_imports = True
-[mypy-phonenumbers.*]
+[mypy-twisted.*]
ignore_missing_imports = True
-[mypy-ijson.*]
+[mypy-zope]
ignore_missing_imports = True
diff --git a/setup.py b/setup.py
index c478563510..f8b4487bc1 100755
--- a/setup.py
+++ b/setup.py
@@ -112,7 +112,16 @@ CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [
"pygithub==1.55",
]
-CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
+CONDITIONAL_REQUIREMENTS["mypy"] = [
+ "mypy==0.910",
+ "mypy-zope==0.3.2",
+ "types-bleach>=4.1.0",
+ "types-jsonschema>=3.2.0",
+ "types-Pillow>=8.3.4",
+ "types-pyOpenSSL>=20.0.7",
+ "types-PyYAML>=5.4.10",
+ "types-setuptools>=57.4.0",
+]
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 5679f05e42..6227434bac 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -172,9 +172,12 @@ class TlsConfig(Config):
)
# YYYYMMDDhhmmssZ -- in UTC
- expires_on = datetime.strptime(
- tls_certificate.get_notAfter().decode("ascii"), "%Y%m%d%H%M%SZ"
- )
+ expiry_data = tls_certificate.get_notAfter()
+ if expiry_data is None:
+ raise ValueError(
+ "TLS Certificate has no expiry date, and this is not permitted"
+ )
+ expires_on = datetime.strptime(expiry_data.decode("ascii"), "%Y%m%d%H%M%SZ")
now = datetime.utcnow()
days_remaining = (expires_on - now).days
return days_remaining
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 5204c3d08c..b5a2d333a6 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -912,7 +912,7 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
def __init__(self):
self._context = SSL.Context(SSL.SSLv23_METHOD)
- self._context.set_verify(VERIFY_NONE, lambda *_: None)
+ self._context.set_verify(VERIFY_NONE, lambda *_: False)
def getContext(self, hostname=None, port=None):
return self._context
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 02e5ddd2ef..bdc0187743 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -52,7 +52,7 @@ try:
is_thread_resource_usage_supported = True
- def get_thread_resource_usage() -> "Optional[resource._RUsage]":
+ def get_thread_resource_usage() -> "Optional[resource.struct_rusage]":
return resource.getrusage(RUSAGE_THREAD)
@@ -61,7 +61,7 @@ except Exception:
# won't track resource usage.
is_thread_resource_usage_supported = False
- def get_thread_resource_usage() -> "Optional[resource._RUsage]":
+ def get_thread_resource_usage() -> "Optional[resource.struct_rusage]":
return None
@@ -226,10 +226,10 @@ class _Sentinel:
def copy_to(self, record):
pass
- def start(self, rusage: "Optional[resource._RUsage]"):
+ def start(self, rusage: "Optional[resource.struct_rusage]"):
pass
- def stop(self, rusage: "Optional[resource._RUsage]"):
+ def stop(self, rusage: "Optional[resource.struct_rusage]"):
pass
def add_database_transaction(self, duration_sec):
@@ -289,7 +289,7 @@ class LoggingContext:
# The thread resource usage when the logcontext became active. None
# if the context is not currently active.
- self.usage_start: Optional[resource._RUsage] = None
+ self.usage_start: Optional[resource.struct_rusage] = None
self.main_thread = get_thread_id()
self.request = None
@@ -410,7 +410,7 @@ class LoggingContext:
# we also track the current scope:
record.scope = self.scope
- def start(self, rusage: "Optional[resource._RUsage]") -> None:
+ def start(self, rusage: "Optional[resource.struct_rusage]") -> None:
"""
Record that this logcontext is currently running.
@@ -435,7 +435,7 @@ class LoggingContext:
else:
self.usage_start = rusage
- def stop(self, rusage: "Optional[resource._RUsage]") -> None:
+ def stop(self, rusage: "Optional[resource.struct_rusage]") -> None:
"""
Record that this logcontext is no longer running.
@@ -490,7 +490,7 @@ class LoggingContext:
return res
- def _get_cputime(self, current: "resource._RUsage") -> Tuple[float, float]:
+ def _get_cputime(self, current: "resource.struct_rusage") -> Tuple[float, float]:
"""Get the cpu usage time between start() and the given rusage
Args:
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 3a14260752..2ab599a334 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -265,7 +265,7 @@ class BackgroundProcessLoggingContext(LoggingContext):
super().__init__("%s-%s" % (name, instance_id))
self._proc = _BackgroundProcess(name, self)
- def start(self, rusage: "Optional[resource._RUsage]"):
+ def start(self, rusage: "Optional[resource.struct_rusage]"):
"""Log context has started running (again)."""
super().start(rusage)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index e38e3c5d44..ce299ba3da 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -892,7 +892,7 @@ def safe_text(raw_text: str) -> jinja2.Markup:
A Markup object ready to safely use in a Jinja template.
"""
return jinja2.Markup(
- bleach.linkify(bleach.clean(raw_text, tags=[], attributes={}, strip=False))
+ bleach.linkify(bleach.clean(raw_text, tags=[], attributes=[], strip=False))
)
diff --git a/synapse/rest/media/v1/__init__.py b/synapse/rest/media/v1/__init__.py
index 3dd16d4bb5..d5b74cddf1 100644
--- a/synapse/rest/media/v1/__init__.py
+++ b/synapse/rest/media/v1/__init__.py
@@ -12,33 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import PIL.Image
+from PIL.features import check_codec
# check for JPEG support.
-try:
- PIL.Image._getdecoder("rgb", "jpeg", None)
-except OSError as e:
- if str(e).startswith("decoder jpeg not available"):
- raise Exception(
- "FATAL: jpeg codec not supported. Install pillow correctly! "
- " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&"
- " pip install pillow --user'"
- )
-except Exception:
- # any other exception is fine
- pass
+if not check_codec("jpg"):
+ raise Exception(
+ "FATAL: jpeg codec not supported. Install pillow correctly! "
+ " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&"
+ " pip install pillow --user'"
+ )
# check for PNG support.
-try:
- PIL.Image._getdecoder("rgb", "zip", None)
-except OSError as e:
- if str(e).startswith("decoder zip not available"):
- raise Exception(
- "FATAL: zip codec not supported. Install pillow correctly! "
- " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&"
- " pip install pillow --user'"
- )
-except Exception:
- # any other exception is fine
- pass
+if not check_codec("zlib"):
+ raise Exception(
+ "FATAL: zip codec not supported. Install pillow correctly! "
+ " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&"
+ " pip install pillow --user'"
+ )
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index df54a40649..46701a8b83 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -61,9 +61,19 @@ class Thumbnailer:
self.transpose_method = None
try:
# We don't use ImageOps.exif_transpose since it crashes with big EXIF
- image_exif = self.image._getexif()
+ #
+ # Ignore safety: Pillow seems to acknowledge that this method is
+ # "private, experimental, but generally widely used". Pillow 6
+ # includes a public getexif() method (no underscore) that we might
+ # consider using instead when we can bump that dependency.
+ #
+ # At the time of writing, Debian buster (currently oldstable)
+ # provides version 5.4.1. It's expected to EOL in mid-2022, see
+ # https://wiki.debian.org/DebianReleases#Production_Releases
+ image_exif = self.image._getexif() # type: ignore
if image_exif is not None:
image_orientation = image_exif.get(EXIF_ORIENTATION_TAG)
+ assert isinstance(image_orientation, int)
self.transpose_method = EXIF_TRANSPOSE_MAPPINGS.get(image_orientation)
except Exception as e:
# A lot of parsing errors can happen when parsing EXIF
@@ -76,7 +86,10 @@ class Thumbnailer:
A tuple containing the new image size in pixels as (width, height).
"""
if self.transpose_method is not None:
- self.image = self.image.transpose(self.transpose_method)
+ # Safety: `transpose` takes an int rather than e.g. an IntEnum.
+ # self.transpose_method is set above to be a value in
+ # EXIF_TRANSPOSE_MAPPINGS, and that only contains correct values.
+ self.image = self.image.transpose(self.transpose_method) # type: ignore[arg-type]
self.width, self.height = self.image.size
self.transpose_method = None
# We don't need EXIF any more
@@ -101,7 +114,7 @@ class Thumbnailer:
else:
return (max_height * self.width) // self.height, max_height
- def _resize(self, width: int, height: int) -> Image:
+ def _resize(self, width: int, height: int) -> Image.Image:
# 1-bit or 8-bit color palette images need converting to RGB
# otherwise they will be scaled using nearest neighbour which
# looks awful.
@@ -151,7 +164,7 @@ class Thumbnailer:
cropped = scaled_image.crop((crop_left, 0, crop_right, height))
return self._encode_image(cropped, output_type)
- def _encode_image(self, output_image: Image, output_type: str) -> BytesIO:
+ def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO:
output_bytes_io = BytesIO()
fmt = self.FORMATS[output_type]
if fmt == "JPEG":
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index a63eaddfdc..11ca47ea28 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -487,6 +487,10 @@ def _upgrade_existing_database(
spec = importlib.util.spec_from_file_location(
module_name, absolute_path
)
+ if spec is None:
+ raise RuntimeError(
+ f"Could not build a module spec for {module_name} at {absolute_path}"
+ )
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 64daff59df..abf53d149d 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -51,7 +51,10 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]:
# fishing the protected dict out of the object is a bit nasty,
# but we don't really want the overhead of copying the dict.
try:
- return obj._dict
+ # Safety: we catch the AttributeError immediately below.
+ # See https://github.com/matrix-org/python-canonicaljson/issues/36#issuecomment-927816293
+ # for discussion on how frozendict's internals have changed over time.
+ return obj._dict # type: ignore[attr-defined]
except AttributeError:
# When the C implementation of frozendict is used,
# there isn't a `_dict` attribute with a dict
--
cgit 1.5.1
From 51a5da74ccd383806378b53ee8a09e27a8829f31 Mon Sep 17 00:00:00 2001
From: David Robertson
Date: Fri, 8 Oct 2021 15:25:16 +0100
Subject: Annotate synapse.storage.util (#10892)
Also mark `synapse.streams` as having has no untyped defs
Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com>
---
changelog.d/10892.misc | 1 +
mypy.ini | 6 +
.../slave/storage/_slaved_id_tracker.py | 4 +-
synapse/replication/slave/storage/pushers.py | 10 +-
synapse/storage/databases/main/pusher.py | 10 +-
synapse/storage/databases/main/registration.py | 9 +-
synapse/storage/util/id_generators.py | 143 +++++++++++++--------
synapse/storage/util/sequence.py | 6 +-
8 files changed, 124 insertions(+), 65 deletions(-)
create mode 100644 changelog.d/10892.misc
diff --git a/changelog.d/10892.misc b/changelog.d/10892.misc
new file mode 100644
index 0000000000..c8c471159b
--- /dev/null
+++ b/changelog.d/10892.misc
@@ -0,0 +1 @@
+Add further type hints to `synapse.storage.util`.
diff --git a/mypy.ini b/mypy.ini
index e7cb80b6eb..bc2b59ff56 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -105,6 +105,12 @@ disallow_untyped_defs = True
[mypy-synapse.state.*]
disallow_untyped_defs = True
+[mypy-synapse.storage.util.*]
+disallow_untyped_defs = True
+
+[mypy-synapse.streams.*]
+disallow_untyped_defs = True
+
[mypy-synapse.util.batching_queue]
disallow_untyped_defs = True
diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py
index 2cb7489047..8c1bf9227a 100644
--- a/synapse/replication/slave/storage/_slaved_id_tracker.py
+++ b/synapse/replication/slave/storage/_slaved_id_tracker.py
@@ -13,14 +13,14 @@
# limitations under the License.
from typing import List, Optional, Tuple
-from synapse.storage.types import Connection
+from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.util.id_generators import _load_current_id
class SlavedIdTracker:
def __init__(
self,
- db_conn: Connection,
+ db_conn: LoggingDatabaseConnection,
table: str,
column: str,
extra_tables: Optional[List[Tuple[str, str]]] = None,
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
index 2672a2c94b..cea90c0f1b 100644
--- a/synapse/replication/slave/storage/pushers.py
+++ b/synapse/replication/slave/storage/pushers.py
@@ -15,9 +15,8 @@
from typing import TYPE_CHECKING
from synapse.replication.tcp.streams import PushersStream
-from synapse.storage.database import DatabasePool
+from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.pusher import PusherWorkerStore
-from synapse.storage.types import Connection
from ._base import BaseSlavedStore
from ._slaved_id_tracker import SlavedIdTracker
@@ -27,7 +26,12 @@ if TYPE_CHECKING:
class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore):
- def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
super().__init__(database, db_conn, hs)
self._pushers_id_gen = SlavedIdTracker( # type: ignore
db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index a93caae8d0..b73ce53c91 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -18,8 +18,7 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional,
from synapse.push import PusherConfig, ThrottleParams
from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.database import DatabasePool
-from synapse.storage.types import Connection
+from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.util.id_generators import StreamIdGenerator
from synapse.types import JsonDict
from synapse.util import json_encoder
@@ -32,7 +31,12 @@ logger = logging.getLogger(__name__)
class PusherWorkerStore(SQLBaseStore):
- def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
super().__init__(database, db_conn, hs)
self._pushers_id_gen = StreamIdGenerator(
db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 7de4ad7f9b..181841ee06 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -26,7 +26,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.databases.main.stats import StatsStore
-from synapse.storage.types import Connection, Cursor
+from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import IdGenerator
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import UserID, UserInfo
@@ -1775,7 +1775,12 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
- def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
super().__init__(database, db_conn, hs)
self._ignore_unknown_session_error = (
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 6f7cbe40f4..852bd79fee 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -16,42 +16,62 @@ import logging
import threading
from collections import OrderedDict
from contextlib import contextmanager
-from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
+from types import TracebackType
+from typing import (
+ AsyncContextManager,
+ ContextManager,
+ Dict,
+ Generator,
+ Generic,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+)
import attr
from sortedcontainers import SortedSet
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.storage.database import DatabasePool, LoggingTransaction
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
from synapse.storage.types import Cursor
from synapse.storage.util.sequence import PostgresSequenceGenerator
logger = logging.getLogger(__name__)
+T = TypeVar("T")
+
+
class IdGenerator:
- def __init__(self, db_conn, table, column):
+ def __init__(
+ self,
+ db_conn: LoggingDatabaseConnection,
+ table: str,
+ column: str,
+ ):
self._lock = threading.Lock()
self._next_id = _load_current_id(db_conn, table, column)
- def get_next(self):
+ def get_next(self) -> int:
with self._lock:
self._next_id += 1
return self._next_id
-def _load_current_id(db_conn, table, column, step=1):
- """
-
- Args:
- db_conn (object):
- table (str):
- column (str):
- step (int):
-
- Returns:
- int
- """
+def _load_current_id(
+ db_conn: LoggingDatabaseConnection, table: str, column: str, step: int = 1
+) -> int:
# debug logging for https://github.com/matrix-org/synapse/issues/7968
logger.info("initialising stream generator for %s(%s)", table, column)
cur = db_conn.cursor(txn_name="_load_current_id")
@@ -59,7 +79,9 @@ def _load_current_id(db_conn, table, column, step=1):
cur.execute("SELECT MAX(%s) FROM %s" % (column, table))
else:
cur.execute("SELECT MIN(%s) FROM %s" % (column, table))
- (val,) = cur.fetchone()
+ result = cur.fetchone()
+ assert result is not None
+ (val,) = result
cur.close()
current_id = int(val) if val else step
return (max if step > 0 else min)(current_id, step)
@@ -93,16 +115,16 @@ class StreamIdGenerator:
def __init__(
self,
- db_conn,
- table,
- column,
+ db_conn: LoggingDatabaseConnection,
+ table: str,
+ column: str,
extra_tables: Iterable[Tuple[str, str]] = (),
- step=1,
- ):
+ step: int = 1,
+ ) -> None:
assert step != 0
self._lock = threading.Lock()
- self._step = step
- self._current = _load_current_id(db_conn, table, column, step)
+ self._step: int = step
+ self._current: int = _load_current_id(db_conn, table, column, step)
for table, column in extra_tables:
self._current = (max if step > 0 else min)(
self._current, _load_current_id(db_conn, table, column, step)
@@ -115,7 +137,7 @@ class StreamIdGenerator:
# The key and values are the same, but we never look at the values.
self._unfinished_ids: OrderedDict[int, int] = OrderedDict()
- def get_next(self):
+ def get_next(self) -> AsyncContextManager[int]:
"""
Usage:
async with stream_id_gen.get_next() as stream_id:
@@ -128,7 +150,7 @@ class StreamIdGenerator:
self._unfinished_ids[next_id] = next_id
@contextmanager
- def manager():
+ def manager() -> Generator[int, None, None]:
try:
yield next_id
finally:
@@ -137,7 +159,7 @@ class StreamIdGenerator:
return _AsyncCtxManagerWrapper(manager())
- def get_next_mult(self, n):
+ def get_next_mult(self, n: int) -> AsyncContextManager[Sequence[int]]:
"""
Usage:
async with stream_id_gen.get_next(n) as stream_ids:
@@ -155,7 +177,7 @@ class StreamIdGenerator:
self._unfinished_ids[next_id] = next_id
@contextmanager
- def manager():
+ def manager() -> Generator[Sequence[int], None, None]:
try:
yield next_ids
finally:
@@ -215,7 +237,7 @@ class MultiWriterIdGenerator:
def __init__(
self,
- db_conn,
+ db_conn: LoggingDatabaseConnection,
db: DatabasePool,
stream_name: str,
instance_name: str,
@@ -223,7 +245,7 @@ class MultiWriterIdGenerator:
sequence_name: str,
writers: List[str],
positive: bool = True,
- ):
+ ) -> None:
self._db = db
self._stream_name = stream_name
self._instance_name = instance_name
@@ -285,9 +307,9 @@ class MultiWriterIdGenerator:
def _load_current_ids(
self,
- db_conn,
+ db_conn: LoggingDatabaseConnection,
tables: List[Tuple[str, str, str]],
- ):
+ ) -> None:
cur = db_conn.cursor(txn_name="_load_current_ids")
# Load the current positions of all writers for the stream.
@@ -335,7 +357,9 @@ class MultiWriterIdGenerator:
"agg": "MAX" if self._positive else "-MIN",
}
cur.execute(sql)
- (stream_id,) = cur.fetchone()
+ result = cur.fetchone()
+ assert result is not None
+ (stream_id,) = result
max_stream_id = max(max_stream_id, stream_id)
@@ -354,7 +378,7 @@ class MultiWriterIdGenerator:
self._persisted_upto_position = min_stream_id
- rows = []
+ rows: List[Tuple[str, int]] = []
for table, instance_column, id_column in tables:
sql = """
SELECT %(instance)s, %(id)s FROM %(table)s
@@ -367,7 +391,8 @@ class MultiWriterIdGenerator:
}
cur.execute(sql, (min_stream_id * self._return_factor,))
- rows.extend(cur)
+ # Cast safety: this corresponds to the types returned by the query above.
+ rows.extend(cast(Iterable[Tuple[str, int]], cur))
# Sort so that we handle rows in order for each instance.
rows.sort()
@@ -385,13 +410,13 @@ class MultiWriterIdGenerator:
cur.close()
- def _load_next_id_txn(self, txn) -> int:
+ def _load_next_id_txn(self, txn: Cursor) -> int:
return self._sequence_gen.get_next_id_txn(txn)
- def _load_next_mult_id_txn(self, txn, n: int) -> List[int]:
+ def _load_next_mult_id_txn(self, txn: Cursor, n: int) -> List[int]:
return self._sequence_gen.get_next_mult_txn(txn, n)
- def get_next(self):
+ def get_next(self) -> AsyncContextManager[int]:
"""
Usage:
async with stream_id_gen.get_next() as stream_id:
@@ -403,9 +428,12 @@ class MultiWriterIdGenerator:
if self._writers and self._instance_name not in self._writers:
raise Exception("Tried to allocate stream ID on non-writer")
- return _MultiWriterCtxManager(self)
+ # Cast safety: the second argument to _MultiWriterCtxManager, multiple_ids,
+ # controls the return type. If `None` or omitted, the context manager yields
+ # a single integer stream_id; otherwise it yields a list of stream_ids.
+ return cast(AsyncContextManager[int], _MultiWriterCtxManager(self))
- def get_next_mult(self, n: int):
+ def get_next_mult(self, n: int) -> AsyncContextManager[List[int]]:
"""
Usage:
async with stream_id_gen.get_next_mult(5) as stream_ids:
@@ -417,9 +445,10 @@ class MultiWriterIdGenerator:
if self._writers and self._instance_name not in self._writers:
raise Exception("Tried to allocate stream ID on non-writer")
- return _MultiWriterCtxManager(self, n)
+ # Cast safety: see get_next.
+ return cast(AsyncContextManager[List[int]], _MultiWriterCtxManager(self, n))
- def get_next_txn(self, txn: LoggingTransaction):
+ def get_next_txn(self, txn: LoggingTransaction) -> int:
"""
Usage:
@@ -457,7 +486,7 @@ class MultiWriterIdGenerator:
return self._return_factor * next_id
- def _mark_id_as_finished(self, next_id: int):
+ def _mark_id_as_finished(self, next_id: int) -> None:
"""The ID has finished being processed so we should advance the
current position if possible.
"""
@@ -534,7 +563,7 @@ class MultiWriterIdGenerator:
for name, i in self._current_positions.items()
}
- def advance(self, instance_name: str, new_id: int):
+ def advance(self, instance_name: str, new_id: int) -> None:
"""Advance the position of the named writer to the given ID, if greater
than existing entry.
"""
@@ -560,7 +589,7 @@ class MultiWriterIdGenerator:
with self._lock:
return self._return_factor * self._persisted_upto_position
- def _add_persisted_position(self, new_id: int):
+ def _add_persisted_position(self, new_id: int) -> None:
"""Record that we have persisted a position.
This is used to keep the `_current_positions` up to date.
@@ -606,7 +635,7 @@ class MultiWriterIdGenerator:
# do.
break
- def _update_stream_positions_table_txn(self, txn: Cursor):
+ def _update_stream_positions_table_txn(self, txn: Cursor) -> None:
"""Update the `stream_positions` table with newly persisted position."""
if not self._writers:
@@ -628,20 +657,25 @@ class MultiWriterIdGenerator:
txn.execute(sql, (self._stream_name, self._instance_name, pos))
-@attr.s(slots=True)
-class _AsyncCtxManagerWrapper:
+@attr.s(frozen=True, auto_attribs=True)
+class _AsyncCtxManagerWrapper(Generic[T]):
"""Helper class to convert a plain context manager to an async one.
This is mainly useful if you have a plain context manager but the interface
requires an async one.
"""
- inner = attr.ib()
+ inner: ContextManager[T]
- async def __aenter__(self):
+ async def __aenter__(self) -> T:
return self.inner.__enter__()
- async def __aexit__(self, exc_type, exc, tb):
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> Optional[bool]:
return self.inner.__exit__(exc_type, exc, tb)
@@ -671,7 +705,12 @@ class _MultiWriterCtxManager:
else:
return [i * self.id_gen._return_factor for i in self.stream_ids]
- async def __aexit__(self, exc_type, exc, tb):
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> bool:
for i in self.stream_ids:
self.id_gen._mark_id_as_finished(i)
diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py
index bb33e04fb1..75268cbe15 100644
--- a/synapse/storage/util/sequence.py
+++ b/synapse/storage/util/sequence.py
@@ -81,7 +81,7 @@ class SequenceGenerator(metaclass=abc.ABCMeta):
id_column: str,
stream_name: Optional[str] = None,
positive: bool = True,
- ):
+ ) -> None:
"""Should be called during start up to test that the current value of
the sequence is greater than or equal to the maximum ID in the table.
@@ -122,7 +122,7 @@ class PostgresSequenceGenerator(SequenceGenerator):
id_column: str,
stream_name: Optional[str] = None,
positive: bool = True,
- ):
+ ) -> None:
"""See SequenceGenerator.check_consistency for docstring."""
txn = db_conn.cursor(txn_name="sequence.check_consistency")
@@ -244,7 +244,7 @@ class LocalSequenceGenerator(SequenceGenerator):
id_column: str,
stream_name: Optional[str] = None,
positive: bool = True,
- ):
+ ) -> None:
# There is nothing to do for in memory sequences
pass
--
cgit 1.5.1
From c576598a6834c59e7e6e51eb72c2967b00762666 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Fri, 8 Oct 2021 17:11:14 +0100
Subject: Include the requirements for [mypy,lint] in [dev]
---
setup.py | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/setup.py b/setup.py
index f8b4487bc1..220084a49d 100755
--- a/setup.py
+++ b/setup.py
@@ -103,15 +103,6 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
"flake8",
]
-CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [
- # The following are used by the release script
- "click==7.1.2",
- "redbaron==0.9.2",
- "GitPython==3.1.14",
- "commonmark==0.9.1",
- "pygithub==1.55",
-]
-
CONDITIONAL_REQUIREMENTS["mypy"] = [
"mypy==0.910",
"mypy-zope==0.3.2",
@@ -130,6 +121,20 @@ CONDITIONAL_REQUIREMENTS["mypy"] = [
# parameterized_class decorator was introduced in parameterized 0.7.0
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"]
+CONDITIONAL_REQUIREMENTS["dev"] = (
+ CONDITIONAL_REQUIREMENTS["lint"]
+ + CONDITIONAL_REQUIREMENTS["mypy"]
+ + CONDITIONAL_REQUIREMENTS["test"]
+ + [
+ # The following are used by the release script
+ "click==7.1.2",
+ "redbaron==0.9.2",
+ "GitPython==3.1.14",
+ "commonmark==0.9.1",
+ "pygithub==1.55",
+ ]
+)
+
setup(
name="matrix-synapse",
version=version,
--
cgit 1.5.1
From 9f23ff78da69c84b9ab6f1dacd4a3fd31d17a812 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Fri, 8 Oct 2021 17:11:32 +0100
Subject: Update contributing guide to use [all,dev]
---
docs/development/contributing_guide.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 580a4f7f98..3bf08a72bb 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -50,7 +50,7 @@ setup a *virtualenv*, as follows:
cd path/where/you/have/cloned/the/repository
python3 -m venv ./env
source ./env/bin/activate
-pip install -e ".[all,lint,mypy,test]"
+pip install -e ".[all,dev]"
pip install tox
```
--
cgit 1.5.1
From d51a3400196763c2de38918719d50ab75f3d1bc5 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Fri, 8 Oct 2021 17:12:40 +0100
Subject: Newsfile
Signed-off-by: Olivier Wilkinson (reivilibre)
---
changelog.d/11034.misc | 1 +
1 file changed, 1 insertion(+)
create mode 100644 changelog.d/11034.misc
diff --git a/changelog.d/11034.misc b/changelog.d/11034.misc
new file mode 100644
index 0000000000..b15fd66ac3
--- /dev/null
+++ b/changelog.d/11034.misc
@@ -0,0 +1 @@
+When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing.
--
cgit 1.5.1
From 593eeac19ea8ecc1344933f91fb4fc18a8a97221 Mon Sep 17 00:00:00 2001
From: "Olivier Wilkinson (reivilibre)"
Date: Fri, 8 Oct 2021 17:15:32 +0100
Subject: Revert accidental push to develop.
---
changelog.d/11034.misc | 1 -
docs/development/contributing_guide.md | 2 +-
setup.py | 23 +++++++++--------------
3 files changed, 10 insertions(+), 16 deletions(-)
delete mode 100644 changelog.d/11034.misc
diff --git a/changelog.d/11034.misc b/changelog.d/11034.misc
deleted file mode 100644
index b15fd66ac3..0000000000
--- a/changelog.d/11034.misc
+++ /dev/null
@@ -1 +0,0 @@
-When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing.
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 3bf08a72bb..580a4f7f98 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -50,7 +50,7 @@ setup a *virtualenv*, as follows:
cd path/where/you/have/cloned/the/repository
python3 -m venv ./env
source ./env/bin/activate
-pip install -e ".[all,dev]"
+pip install -e ".[all,lint,mypy,test]"
pip install tox
```
diff --git a/setup.py b/setup.py
index 220084a49d..f8b4487bc1 100755
--- a/setup.py
+++ b/setup.py
@@ -103,6 +103,15 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
"flake8",
]
+CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [
+ # The following are used by the release script
+ "click==7.1.2",
+ "redbaron==0.9.2",
+ "GitPython==3.1.14",
+ "commonmark==0.9.1",
+ "pygithub==1.55",
+]
+
CONDITIONAL_REQUIREMENTS["mypy"] = [
"mypy==0.910",
"mypy-zope==0.3.2",
@@ -121,20 +130,6 @@ CONDITIONAL_REQUIREMENTS["mypy"] = [
# parameterized_class decorator was introduced in parameterized 0.7.0
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"]
-CONDITIONAL_REQUIREMENTS["dev"] = (
- CONDITIONAL_REQUIREMENTS["lint"]
- + CONDITIONAL_REQUIREMENTS["mypy"]
- + CONDITIONAL_REQUIREMENTS["test"]
- + [
- # The following are used by the release script
- "click==7.1.2",
- "redbaron==0.9.2",
- "GitPython==3.1.14",
- "commonmark==0.9.1",
- "pygithub==1.55",
- ]
-)
-
setup(
name="matrix-synapse",
version=version,
--
cgit 1.5.1
From 1b112840d2c6dafa131eba4f0285409bb7345661 Mon Sep 17 00:00:00 2001
From: Patrick Cloke
Date: Fri, 8 Oct 2021 14:14:42 -0400
Subject: Autodiscover oEmbed endpoint from returned HTML (#10822)
Searches the returned HTML for an oEmbed endpoint using the
autodiscovery mechanism (``), and will request it
to generate the preview.
---
changelog.d/10822.feature | 1 +
synapse/rest/media/v1/oembed.py | 26 ++++++
synapse/rest/media/v1/preview_url_resource.py | 112 +++++++++++++++++---------
tests/rest/media/v1/test_url_preview.py | 100 ++++++++++++++++++++++-
tests/test_preview.py | 40 +++++----
5 files changed, 224 insertions(+), 55 deletions(-)
create mode 100644 changelog.d/10822.feature
diff --git a/changelog.d/10822.feature b/changelog.d/10822.feature
new file mode 100644
index 0000000000..72566e31ec
--- /dev/null
+++ b/changelog.d/10822.feature
@@ -0,0 +1 @@
+Support autodiscovery of oEmbed previews.
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
index e04671fb95..6d7e1f9064 100644
--- a/synapse/rest/media/v1/oembed.py
+++ b/synapse/rest/media/v1/oembed.py
@@ -96,6 +96,32 @@ class OEmbedProvider:
# No match.
return None
+ def autodiscover_from_html(self, tree: "etree.Element") -> Optional[str]:
+ """
+ Search an HTML document for oEmbed autodiscovery information.
+
+ Args:
+ tree: The parsed HTML body.
+
+ Returns:
+ The URL to use for oEmbed information, or None if no URL was found.
+ """
+ # Search for link elements with the proper rel and type attributes.
+ for tag in tree.xpath(
+ "//link[@rel='alternate'][@type='application/json+oembed']"
+ ):
+ if "href" in tag.attrib:
+ return tag.attrib["href"]
+
+ # Some providers (e.g. Flickr) use alternative instead of alternate.
+ for tag in tree.xpath(
+ "//link[@rel='alternative'][@type='application/json+oembed']"
+ ):
+ if "href" in tag.attrib:
+ return tag.attrib["href"]
+
+ return None
+
def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult:
"""
Parse the oEmbed response into an Open Graph response.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 044f44a397..1fe0fc8aa9 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -22,7 +22,7 @@ import re
import shutil
import sys
import traceback
-from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Tuple, Union
from urllib import parse as urlparse
import attr
@@ -296,22 +296,32 @@ class PreviewUrlResource(DirectServeJsonResource):
body = file.read()
encoding = get_html_media_encoding(body, media_info.media_type)
- og = decode_and_calc_og(body, media_info.uri, encoding)
-
- await self._precache_image_url(user, media_info, og)
-
- elif oembed_url and _is_json(media_info.media_type):
- # Handle an oEmbed response.
- with open(media_info.filename, "rb") as file:
- body = file.read()
-
- oembed_response = self._oembed.parse_oembed_response(url, body)
- og = oembed_response.open_graph_result
-
- # Use the cache age from the oEmbed result, instead of the HTTP response.
- if oembed_response.cache_age is not None:
- expiration_ms = oembed_response.cache_age
+ tree = decode_body(body, encoding)
+ if tree is not None:
+ # Check if this HTML document points to oEmbed information and
+ # defer to that.
+ oembed_url = self._oembed.autodiscover_from_html(tree)
+ og = {}
+ if oembed_url:
+ oembed_info = await self._download_url(oembed_url, user)
+ og, expiration_ms = await self._handle_oembed_response(
+ url, oembed_info, expiration_ms
+ )
+
+ # If there was no oEmbed URL (or oEmbed parsing failed), attempt
+ # to generate the Open Graph information from the HTML.
+ if not oembed_url or not og:
+ og = _calc_og(tree, media_info.uri)
+
+ await self._precache_image_url(user, media_info, og)
+ else:
+ og = {}
+ elif oembed_url:
+ # Handle the oEmbed information.
+ og, expiration_ms = await self._handle_oembed_response(
+ url, media_info, expiration_ms
+ )
await self._precache_image_url(user, media_info, og)
else:
@@ -479,6 +489,39 @@ class PreviewUrlResource(DirectServeJsonResource):
else:
del og["og:image"]
+ async def _handle_oembed_response(
+ self, url: str, media_info: MediaInfo, expiration_ms: int
+ ) -> Tuple[JsonDict, int]:
+ """
+ Parse the downloaded oEmbed info.
+
+ Args:
+ url: The URL which is being previewed (not the one which was
+ requested).
+ media_info: The media being previewed.
+ expiration_ms: The length of time, in milliseconds, the media is valid for.
+
+ Returns:
+ A tuple of:
+ The Open Graph dictionary, if the oEmbed info can be parsed.
+ The (possibly updated) length of time, in milliseconds, the media is valid for.
+ """
+ # If JSON was not returned, there's nothing to do.
+ if not _is_json(media_info.media_type):
+ return {}, expiration_ms
+
+ with open(media_info.filename, "rb") as file:
+ body = file.read()
+
+ oembed_response = self._oembed.parse_oembed_response(url, body)
+ open_graph_result = oembed_response.open_graph_result
+
+ # Use the cache age from the oEmbed result, if one was given.
+ if open_graph_result and oembed_response.cache_age is not None:
+ expiration_ms = oembed_response.cache_age
+
+ return open_graph_result, expiration_ms
+
def _start_expire_url_cache_data(self) -> Deferred:
return run_as_background_process(
"expire_url_cache_data", self._expire_url_cache_data
@@ -631,26 +674,22 @@ def get_html_media_encoding(body: bytes, content_type: str) -> str:
return "utf-8"
-def decode_and_calc_og(
- body: bytes, media_uri: str, request_encoding: Optional[str] = None
-) -> JsonDict:
+def decode_body(
+ body: bytes, request_encoding: Optional[str] = None
+) -> Optional["etree.Element"]:
"""
- Calculate metadata for an HTML document.
-
- This uses lxml to parse the HTML document into the OG response. If errors
- occur during processing of the document, an empty response is returned.
+ This uses lxml to parse the HTML document.
Args:
body: The HTML document, as bytes.
- media_url: The URI used to download the body.
request_encoding: The character encoding of the body, as a string.
Returns:
- The OG response as a dictionary.
+ The parsed HTML body, or None if an error occurred during processed.
"""
# If there's no body, nothing useful is going to be found.
if not body:
- return {}
+ return None
from lxml import etree
@@ -662,25 +701,22 @@ def decode_and_calc_og(
parser = etree.HTMLParser(recover=True, encoding="utf-8")
except Exception as e:
logger.warning("Unable to create HTML parser: %s" % (e,))
- return {}
-
- def _attempt_calc_og(body_attempt: Union[bytes, str]) -> Dict[str, Optional[str]]:
- # Attempt to parse the body. If this fails, log and return no metadata.
- tree = etree.fromstring(body_attempt, parser)
-
- # The data was successfully parsed, but no tree was found.
- if tree is None:
- return {}
+ return None
- return _calc_og(tree, media_uri)
+ def _attempt_decode_body(
+ body_attempt: Union[bytes, str]
+ ) -> Optional["etree.Element"]:
+ # Attempt to parse the body. Returns None if the body was successfully
+ # parsed, but no tree was found.
+ return etree.fromstring(body_attempt, parser)
# Attempt to parse the body. If this fails, log and return no metadata.
try:
- return _attempt_calc_og(body)
+ return _attempt_decode_body(body)
except UnicodeDecodeError:
# blindly try decoding the body as utf-8, which seems to fix
# the charset mismatches on https://google.com
- return _attempt_calc_og(body.decode("utf-8", "ignore"))
+ return _attempt_decode_body(body.decode("utf-8", "ignore"))
def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index ce43de780b..8698135a76 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -725,9 +725,107 @@ class URLPreviewTests(unittest.HomeserverTestCase):
},
)
+ def test_oembed_autodiscovery(self):
+ """
+ Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL.
+ 1. Request a preview of a URL which is not known to the oEmbed code.
+ 2. It returns HTML including a link to an oEmbed preview.
+ 3. The oEmbed preview is requested and returns a URL for an image.
+ 4. The image is requested for thumbnailing.
+ """
+ # This is a little cheesy in that we use the www subdomain (which isn't the
+ # list of oEmbed patterns) to get "raw" HTML response.
+ self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = b"""
+
+ """
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+
+ self.pump()
+
+ # The oEmbed response.
+ result2 = {
+ "version": "1.0",
+ "type": "photo",
+ "url": "http://cdn.twitter.com/matrixdotorg",
+ }
+ oembed_content = json.dumps(result2).encode("utf-8")
+
+ # Ensure a second request is made to the oEmbed URL.
+ client = self.reactor.tcpClients[1][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+ )
+ % (len(oembed_content),)
+ + oembed_content
+ )
+
+ self.pump()
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"/oembed?", server.data)
+
+ # Ensure a third request is made to the photo URL.
+ client = self.reactor.tcpClients[2][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: image/png\r\n\r\n"
+ )
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+
+ self.pump()
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"/matrixdotorg", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(
+ body["og:url"], "http://www.twitter.com/matrixdotorg/status/12345"
+ )
+ self.assertTrue(body["og:image"].startswith("mxc://"))
+ self.assertEqual(body["og:image:height"], 1)
+ self.assertEqual(body["og:image:width"], 1)
+ self.assertEqual(body["og:image:type"], "image/png")
+
def _download_image(self):
"""Downloads an image into the URL cache.
-
Returns:
A (host, media_id) tuple representing the MXC URI of the image.
"""
diff --git a/tests/test_preview.py b/tests/test_preview.py
index 48e792b55b..09e017b4d9 100644
--- a/tests/test_preview.py
+++ b/tests/test_preview.py
@@ -13,7 +13,8 @@
# limitations under the License.
from synapse.rest.media.v1.preview_url_resource import (
- decode_and_calc_og,
+ _calc_og,
+ decode_body,
get_html_media_encoding,
summarize_paragraphs,
)
@@ -158,7 +159,8 @@ class CalcOgTestCase(unittest.TestCase):
"""
- og = decode_and_calc_og(html, "http://example.com/test.html")
+ tree = decode_body(html)
+ og = _calc_og(tree, "http://example.com/test.html")
self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
@@ -173,7 +175,8 @@ class CalcOgTestCase(unittest.TestCase):