summary refs log tree commit diff
path: root/synapse/rest/media/v1
diff options
context:
space:
mode:
authorPatrick Cloke <clokep@users.noreply.github.com>2021-10-14 10:17:20 -0400
committerGitHub <noreply@github.com>2021-10-14 10:17:20 -0400
commite2f0b49b3fa9fd87cd24ac6bdc46a94db532ba89 (patch)
treee3548ea528e308ec9fda883abb1a74a9af23ff05 /synapse/rest/media/v1
parentFix-up some type hints in the relations tests. (#11076) (diff)
downloadsynapse-e2f0b49b3fa9fd87cd24ac6bdc46a94db532ba89.tar.xz
Attempt different character encodings when previewing a URL. (#11077)
This follows similar logic to BeautifulSoup where we attempt different
character encodings until we find one which works.
Diffstat (limited to 'synapse/rest/media/v1')
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py80
1 files changed, 39 insertions, 41 deletions
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 5bddd21ef1..7ee91a0c05 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -295,8 +295,7 @@ class PreviewUrlResource(DirectServeJsonResource):
             with open(media_info.filename, "rb") as file:
                 body = file.read()
 
-            encoding = get_html_media_encoding(body, media_info.media_type)
-            tree = decode_body(body, encoding)
+            tree = decode_body(body, media_info.uri, media_info.media_type)
             if tree is not None:
                 # Check if this HTML document points to oEmbed information and
                 # defer to that.
@@ -632,16 +631,19 @@ class PreviewUrlResource(DirectServeJsonResource):
             logger.debug("No media removed from url cache")
 
 
-def get_html_media_encoding(body: bytes, content_type: str) -> str:
+def get_html_media_encodings(body: bytes, content_type: Optional[str]) -> Iterable[str]:
     """
-    Get the encoding of the body based on the (presumably) HTML body or media_type.
+    Get potential encoding of the body based on the (presumably) HTML body or the content-type header.
 
     The precedence used for finding a character encoding is:
 
-    1. meta tag with a charset declared.
+    1. <meta> tag with a charset declared.
     2. The XML document's character encoding attribute.
     3. The Content-Type header.
-    4. Fallback to UTF-8.
+    4. Fallback to utf-8.
+    5. Fallback to windows-1252.
+
+    This roughly follows the algorithm used by BeautifulSoup's bs4.dammit.EncodingDetector.
 
     Args:
         body: The HTML document, as bytes.
@@ -653,36 +655,39 @@ def get_html_media_encoding(body: bytes, content_type: str) -> str:
     # Limit searches to the first 1kb, since it ought to be at the top.
     body_start = body[:1024]
 
-    # Let's try and figure out if it has an encoding set in a meta tag.
+    # Check if it has an encoding set in a meta tag.
     match = _charset_match.search(body_start)
     if match:
-        return match.group(1).decode("ascii")
+        yield match.group(1).decode("ascii")
 
     # TODO Support <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
 
-    # If we didn't find a match, see if it an XML document with an encoding.
+    # Check if it has an XML document with an encoding.
     match = _xml_encoding_match.match(body_start)
     if match:
-        return match.group(1).decode("ascii")
+        yield match.group(1).decode("ascii")
 
-    # If we don't find a match, we'll look at the HTTP Content-Type, and
-    # if that doesn't exist, we'll fall back to UTF-8.
-    content_match = _content_type_match.match(content_type)
-    if content_match:
-        return content_match.group(1)
+    # Check the HTTP Content-Type header for a character set.
+    if content_type:
+        content_match = _content_type_match.match(content_type)
+        if content_match:
+            yield content_match.group(1)
 
-    return "utf-8"
+    # Finally, fallback to UTF-8, then windows-1252.
+    yield "utf-8"
+    yield "windows-1252"
 
 
 def decode_body(
-    body: bytes, request_encoding: Optional[str] = None
+    body: bytes, uri: str, content_type: Optional[str] = None
 ) -> Optional["etree.Element"]:
     """
     This uses lxml to parse the HTML document.
 
     Args:
         body: The HTML document, as bytes.
-        request_encoding: The character encoding of the body, as a string.
+        uri: The URI used to download the body.
+        content_type: The Content-Type header.
 
     Returns:
         The parsed HTML body, or None if an error occurred during processed.
@@ -691,32 +696,25 @@ def decode_body(
     if not body:
         return None
 
+    for encoding in get_html_media_encodings(body, content_type):
+        try:
+            body_str = body.decode(encoding)
+        except Exception:
+            pass
+        else:
+            break
+    else:
+        logger.warning("Unable to decode HTML body for %s", uri)
+        return None
+
     from lxml import etree
 
-    # Create an HTML parser. If this fails, log and return no metadata.
-    try:
-        parser = etree.HTMLParser(recover=True, encoding=request_encoding)
-    except LookupError:
-        # blindly consider the encoding as utf-8.
-        parser = etree.HTMLParser(recover=True, encoding="utf-8")
-    except Exception as e:
-        logger.warning("Unable to create HTML parser: %s" % (e,))
-        return None
+    # Create an HTML parser.
+    parser = etree.HTMLParser(recover=True, encoding="utf-8")
 
-    def _attempt_decode_body(
-        body_attempt: Union[bytes, str]
-    ) -> Optional["etree.Element"]:
-        # Attempt to parse the body. Returns None if the body was successfully
-        # parsed, but no tree was found.
-        return etree.fromstring(body_attempt, parser)
-
-    # Attempt to parse the body. If this fails, log and return no metadata.
-    try:
-        return _attempt_decode_body(body)
-    except UnicodeDecodeError:
-        # blindly try decoding the body as utf-8, which seems to fix
-        # the charset mismatches on https://google.com
-        return _attempt_decode_body(body.decode("utf-8", "ignore"))
+    # Attempt to parse the body. Returns None if the body was successfully
+    # parsed, but no tree was found.
+    return etree.fromstring(body_str, parser)
 
 
 def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]: