diff options
Diffstat (limited to 'synapse/rest/media')
-rw-r--r-- | synapse/rest/media/v1/download_resource.py | 7 | ||||
-rw-r--r-- | synapse/rest/media/v1/media_storage.py | 7 | ||||
-rw-r--r-- | synapse/rest/media/v1/preview_html.py | 112 | ||||
-rw-r--r-- | synapse/rest/media/v1/thumbnail_resource.py | 7 |
4 files changed, 111 insertions, 22 deletions
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 6180fa575e..048a042692 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -15,7 +15,11 @@ import logging from typing import TYPE_CHECKING -from synapse.http.server import DirectServeJsonResource, set_cors_headers +from synapse.http.server import ( + DirectServeJsonResource, + set_corp_headers, + set_cors_headers, +) from synapse.http.servlet import parse_boolean from synapse.http.site import SynapseRequest @@ -38,6 +42,7 @@ class DownloadResource(DirectServeJsonResource): async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) + set_corp_headers(request) request.setHeader( b"Content-Security-Policy", b"sandbox;" diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 604f18bf52..9137417342 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -36,6 +36,7 @@ from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender +import synapse from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util import Clock @@ -145,15 +146,15 @@ class MediaStorage: f.flush() f.close() - spam = await self.spam_checker.check_media_file_for_spam( + spam_check = await self.spam_checker.check_media_file_for_spam( ReadableFileWrapper(self.clock, fname), file_info ) - if spam: + if spam_check != synapse.module_api.NOT_SPAM: logger.info("Blocking media due to spam checker") # Note that we'll delete the stored media, due to the # try/except below. The media also won't be stored in # the DB. - raise SpamMediaException() + raise SpamMediaException(errcode=spam_check) for provider in self.storage_providers: await provider.store_file(path, file_info) diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index 5f334f4634..afe4e29758 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -14,7 +14,16 @@ import codecs import logging import re -from typing import TYPE_CHECKING, Dict, Generator, Iterable, List, Optional, Set, Union +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + Generator, + Iterable, + List, Optional, + Set, + Union, +) if TYPE_CHECKING: from lxml import etree @@ -145,6 +154,70 @@ def decode_body( return etree.fromstring(body, parser) +def _get_meta_tags( + tree: "etree.Element", + property: str, + prefix: str, + property_mapper: Optional[Callable[[str], Optional[str]]] = None, +) -> Dict[str, Optional[str]]: + """ + Search for meta tags prefixed with a particular string. + + Args: + tree: The parsed HTML document. + property: The name of the property which contains the tag name, e.g. + "property" for Open Graph. + prefix: The prefix on the property to search for, e.g. "og" for Open Graph. + property_mapper: An optional callable to map the property to the Open Graph + form. Can return None for a key to ignore that key. + + Returns: + A map of tag name to value. + """ + results: Dict[str, Optional[str]] = {} + for tag in tree.xpath( + f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" + ): + # if we've got more than 50 tags, someone is taking the piss + if len(results) >= 50: + logger.warning( + "Skipping parsing of Open Graph for page with too many '%s:' tags", + prefix, + ) + return {} + + key = tag.attrib[property] + if property_mapper: + key = property_mapper(key) + # None is a special value used to ignore a value. + if key is None: + continue + + results[key] = tag.attrib["content"] + + return results + + +def _map_twitter_to_open_graph(key: str) -> Optional[str]: + """ + Map a Twitter card property to the analogous Open Graph property. + + Args: + key: The Twitter card property (starts with "twitter:"). + + Returns: + The Open Graph property (starts with "og:") or None to have this property + be ignored. + """ + # Twitter card properties with no analogous Open Graph property. + if key == "twitter:card" or key == "twitter:creator": + return None + if key == "twitter:site": + return "og:site_name" + # Otherwise, swap twitter to og. + return "og" + key[7:] + + def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -159,10 +232,8 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: The Open Graph response as a dictionary. """ - # if we see any image URLs in the OG response, then spider them - # (although the client could choose to do this by asking for previews of those - # URLs to avoid DoSing the server) - + # Search for Open Graph (og:) meta tags, e.g.: + # # "og:type" : "video", # "og:url" : "https://www.youtube.com/watch?v=LXDBoHyjmtw", # "og:site_name" : "YouTube", @@ -175,19 +246,11 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: # "og:video:height" : "720", # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3", - og: Dict[str, Optional[str]] = {} - for tag in tree.xpath( - "//*/meta[starts-with(@property, 'og:')][@content][not(@content='')]" - ): - # if we've got more than 50 tags, someone is taking the piss - if len(og) >= 50: - logger.warning("Skipping OG for page with too many 'og:' tags") - return {} - - og[tag.attrib["property"]] = tag.attrib["content"] - - # TODO: grab article: meta tags too, e.g.: + og = _get_meta_tags(tree, "property", "og") + # TODO: Search for properties specific to the different Open Graph types, + # such as article: meta tags, e.g.: + # # "article:publisher" : "https://www.facebook.com/thethudonline" /> # "article:author" content="https://www.facebook.com/thethudonline" /> # "article:tag" content="baby" /> @@ -195,6 +258,21 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: # "article:published_time" content="2016-03-31T19:58:24+00:00" /> # "article:modified_time" content="2016-04-01T18:31:53+00:00" /> + # Search for Twitter Card (twitter:) meta tags, e.g.: + # + # "twitter:site" : "@matrixdotorg" + # "twitter:creator" : "@matrixdotorg" + # + # Twitter cards tags also duplicate Open Graph tags. + # + # See https://developer.twitter.com/en/docs/twitter-for-websites/cards/guides/getting-started + twitter = _get_meta_tags(tree, "name", "twitter", _map_twitter_to_open_graph) + # Merge the Twitter values with the Open Graph values, but do not overwrite + # information from Open Graph tags. + for key, value in twitter.items(): + if key not in og: + og[key] = value + if "og:title" not in og: # Attempt to find a title from the title tag, or the biggest header on the page. title = tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()") diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 53b1565243..2295adfaa7 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -18,7 +18,11 @@ import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from synapse.api.errors import SynapseError -from synapse.http.server import DirectServeJsonResource, set_cors_headers +from synapse.http.server import ( + DirectServeJsonResource, + set_corp_headers, + set_cors_headers, +) from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.media.v1.media_storage import MediaStorage @@ -58,6 +62,7 @@ class ThumbnailResource(DirectServeJsonResource): async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) + set_corp_headers(request) server_name, media_id, _ = parse_media_id(request) width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) |