mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-07-26 23:25:18 -04:00
Attempt different character encodings when previewing a URL. (#11077)
This follows similar logic to BeautifulSoup where we attempt different character encodings until we find one which works.
This commit is contained in:
parent
1609ccf8fe
commit
e2f0b49b3f
3 changed files with 80 additions and 67 deletions
|
@ -295,8 +295,7 @@ class PreviewUrlResource(DirectServeJsonResource):
|
|||
with open(media_info.filename, "rb") as file:
|
||||
body = file.read()
|
||||
|
||||
encoding = get_html_media_encoding(body, media_info.media_type)
|
||||
tree = decode_body(body, encoding)
|
||||
tree = decode_body(body, media_info.uri, media_info.media_type)
|
||||
if tree is not None:
|
||||
# Check if this HTML document points to oEmbed information and
|
||||
# defer to that.
|
||||
|
@ -632,16 +631,19 @@ class PreviewUrlResource(DirectServeJsonResource):
|
|||
logger.debug("No media removed from url cache")
|
||||
|
||||
|
||||
def get_html_media_encoding(body: bytes, content_type: str) -> str:
|
||||
def get_html_media_encodings(body: bytes, content_type: Optional[str]) -> Iterable[str]:
|
||||
"""
|
||||
Get the encoding of the body based on the (presumably) HTML body or media_type.
|
||||
Get potential encoding of the body based on the (presumably) HTML body or the content-type header.
|
||||
|
||||
The precedence used for finding a character encoding is:
|
||||
|
||||
1. meta tag with a charset declared.
|
||||
1. <meta> tag with a charset declared.
|
||||
2. The XML document's character encoding attribute.
|
||||
3. The Content-Type header.
|
||||
4. Fallback to UTF-8.
|
||||
4. Fallback to utf-8.
|
||||
5. Fallback to windows-1252.
|
||||
|
||||
This roughly follows the algorithm used by BeautifulSoup's bs4.dammit.EncodingDetector.
|
||||
|
||||
Args:
|
||||
body: The HTML document, as bytes.
|
||||
|
@ -653,36 +655,39 @@ def get_html_media_encoding(body: bytes, content_type: str) -> str:
|
|||
# Limit searches to the first 1kb, since it ought to be at the top.
|
||||
body_start = body[:1024]
|
||||
|
||||
# Let's try and figure out if it has an encoding set in a meta tag.
|
||||
# Check if it has an encoding set in a meta tag.
|
||||
match = _charset_match.search(body_start)
|
||||
if match:
|
||||
return match.group(1).decode("ascii")
|
||||
yield match.group(1).decode("ascii")
|
||||
|
||||
# TODO Support <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
|
||||
|
||||
# If we didn't find a match, see if it an XML document with an encoding.
|
||||
# Check if it has an XML document with an encoding.
|
||||
match = _xml_encoding_match.match(body_start)
|
||||
if match:
|
||||
return match.group(1).decode("ascii")
|
||||
yield match.group(1).decode("ascii")
|
||||
|
||||
# If we don't find a match, we'll look at the HTTP Content-Type, and
|
||||
# if that doesn't exist, we'll fall back to UTF-8.
|
||||
content_match = _content_type_match.match(content_type)
|
||||
if content_match:
|
||||
return content_match.group(1)
|
||||
# Check the HTTP Content-Type header for a character set.
|
||||
if content_type:
|
||||
content_match = _content_type_match.match(content_type)
|
||||
if content_match:
|
||||
yield content_match.group(1)
|
||||
|
||||
return "utf-8"
|
||||
# Finally, fallback to UTF-8, then windows-1252.
|
||||
yield "utf-8"
|
||||
yield "windows-1252"
|
||||
|
||||
|
||||
def decode_body(
|
||||
body: bytes, request_encoding: Optional[str] = None
|
||||
body: bytes, uri: str, content_type: Optional[str] = None
|
||||
) -> Optional["etree.Element"]:
|
||||
"""
|
||||
This uses lxml to parse the HTML document.
|
||||
|
||||
Args:
|
||||
body: The HTML document, as bytes.
|
||||
request_encoding: The character encoding of the body, as a string.
|
||||
uri: The URI used to download the body.
|
||||
content_type: The Content-Type header.
|
||||
|
||||
Returns:
|
||||
The parsed HTML body, or None if an error occurred during processed.
|
||||
|
@ -691,32 +696,25 @@ def decode_body(
|
|||
if not body:
|
||||
return None
|
||||
|
||||
from lxml import etree
|
||||
|
||||
# Create an HTML parser. If this fails, log and return no metadata.
|
||||
try:
|
||||
parser = etree.HTMLParser(recover=True, encoding=request_encoding)
|
||||
except LookupError:
|
||||
# blindly consider the encoding as utf-8.
|
||||
parser = etree.HTMLParser(recover=True, encoding="utf-8")
|
||||
except Exception as e:
|
||||
logger.warning("Unable to create HTML parser: %s" % (e,))
|
||||
for encoding in get_html_media_encodings(body, content_type):
|
||||
try:
|
||||
body_str = body.decode(encoding)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
logger.warning("Unable to decode HTML body for %s", uri)
|
||||
return None
|
||||
|
||||
def _attempt_decode_body(
|
||||
body_attempt: Union[bytes, str]
|
||||
) -> Optional["etree.Element"]:
|
||||
# Attempt to parse the body. Returns None if the body was successfully
|
||||
# parsed, but no tree was found.
|
||||
return etree.fromstring(body_attempt, parser)
|
||||
from lxml import etree
|
||||
|
||||
# Attempt to parse the body. If this fails, log and return no metadata.
|
||||
try:
|
||||
return _attempt_decode_body(body)
|
||||
except UnicodeDecodeError:
|
||||
# blindly try decoding the body as utf-8, which seems to fix
|
||||
# the charset mismatches on https://google.com
|
||||
return _attempt_decode_body(body.decode("utf-8", "ignore"))
|
||||
# Create an HTML parser.
|
||||
parser = etree.HTMLParser(recover=True, encoding="utf-8")
|
||||
|
||||
# Attempt to parse the body. Returns None if the body was successfully
|
||||
# parsed, but no tree was found.
|
||||
return etree.fromstring(body_str, parser)
|
||||
|
||||
|
||||
def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue