mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-08-17 20:40:16 -04:00
Merge remote-tracking branch 'upstream/release-v1.46'
This commit is contained in:
commit
cf45cfd314
172 changed files with 5549 additions and 2350 deletions
|
@ -110,7 +110,7 @@ class DevicesRestServlet(RestServlet):
|
|||
def __init__(self, hs: "HomeServer"):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
hs: server
|
||||
"""
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
|
|
|
@ -40,7 +40,7 @@ class QuarantineMediaInRoom(RestServlet):
|
|||
"""
|
||||
|
||||
PATTERNS = [
|
||||
*admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine"),
|
||||
*admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine$"),
|
||||
# This path kept around for legacy reasons
|
||||
*admin_patterns("/quarantine_media/(?P<room_id>[^/]+)"),
|
||||
]
|
||||
|
@ -70,7 +70,7 @@ class QuarantineMediaByUser(RestServlet):
|
|||
this server.
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/user/(?P<user_id>[^/]+)/media/quarantine")
|
||||
PATTERNS = admin_patterns("/user/(?P<user_id>[^/]+)/media/quarantine$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
@ -199,7 +199,7 @@ class UnprotectMediaByID(RestServlet):
|
|||
class ListMediaInRoom(RestServlet):
|
||||
"""Lists all of the media in a given room."""
|
||||
|
||||
PATTERNS = admin_patterns("/room/(?P<room_id>[^/]+)/media")
|
||||
PATTERNS = admin_patterns("/room/(?P<room_id>[^/]+)/media$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
@ -219,7 +219,7 @@ class ListMediaInRoom(RestServlet):
|
|||
|
||||
|
||||
class PurgeMediaCacheRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/purge_media_cache")
|
||||
PATTERNS = admin_patterns("/purge_media_cache$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.media_repository = hs.get_media_repository()
|
||||
|
@ -231,6 +231,20 @@ class PurgeMediaCacheRestServlet(RestServlet):
|
|||
before_ts = parse_integer(request, "before_ts", required=True)
|
||||
logger.info("before_ts: %r", before_ts)
|
||||
|
||||
if before_ts < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter before_ts must be a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
elif before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter before_ts you provided is from the year 1970. "
|
||||
+ "Double check that you are providing a timestamp in milliseconds.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
ret = await self.media_repository.delete_old_remote_media(before_ts)
|
||||
|
||||
return 200, ret
|
||||
|
@ -271,7 +285,7 @@ class DeleteMediaByDateSize(RestServlet):
|
|||
timestamp and size.
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/media/(?P<server_name>[^/]+)/delete")
|
||||
PATTERNS = admin_patterns("/media/(?P<server_name>[^/]+)/delete$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
@ -291,7 +305,14 @@ class DeleteMediaByDateSize(RestServlet):
|
|||
if before_ts < 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter before_ts must be a string representing a positive integer.",
|
||||
"Query parameter before_ts must be a positive integer.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
elif before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Query parameter before_ts you provided is from the year 1970. "
|
||||
+ "Double check that you are providing a timestamp in milliseconds.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
if size_gt < 0:
|
||||
|
|
|
@ -35,6 +35,7 @@ from synapse.rest.admin._base import (
|
|||
assert_user_is_admin,
|
||||
)
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.storage.databases.main.registration import ExternalIDReuseException
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
|
@ -228,12 +229,12 @@ class UserRestServletV2(RestServlet):
|
|||
if not isinstance(deactivate, bool):
|
||||
raise SynapseError(400, "'deactivated' parameter is not of type boolean")
|
||||
|
||||
# convert List[Dict[str, str]] into Set[Tuple[str, str]]
|
||||
# convert List[Dict[str, str]] into List[Tuple[str, str]]
|
||||
if external_ids is not None:
|
||||
new_external_ids = {
|
||||
new_external_ids = [
|
||||
(external_id["auth_provider"], external_id["external_id"])
|
||||
for external_id in external_ids
|
||||
}
|
||||
]
|
||||
|
||||
# convert List[Dict[str, str]] into Set[Tuple[str, str]]
|
||||
if threepids is not None:
|
||||
|
@ -275,28 +276,13 @@ class UserRestServletV2(RestServlet):
|
|||
)
|
||||
|
||||
if external_ids is not None:
|
||||
# get changed external_ids (added and removed)
|
||||
cur_external_ids = set(
|
||||
await self.store.get_external_ids_by_user(user_id)
|
||||
)
|
||||
add_external_ids = new_external_ids - cur_external_ids
|
||||
del_external_ids = cur_external_ids - new_external_ids
|
||||
|
||||
# remove old external_ids
|
||||
for auth_provider, external_id in del_external_ids:
|
||||
await self.store.remove_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
# add new external_ids
|
||||
for auth_provider, external_id in add_external_ids:
|
||||
await self.store.record_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
try:
|
||||
await self.store.replace_user_external_id(
|
||||
new_external_ids,
|
||||
user_id,
|
||||
)
|
||||
except ExternalIDReuseException:
|
||||
raise SynapseError(409, "External id is already in use.")
|
||||
|
||||
if "avatar_url" in body and isinstance(body["avatar_url"], str):
|
||||
await self.profile_handler.set_avatar_url(
|
||||
|
@ -340,6 +326,9 @@ class UserRestServletV2(RestServlet):
|
|||
target_user.to_string()
|
||||
)
|
||||
|
||||
if "user_type" in body:
|
||||
await self.store.set_user_type(target_user, user_type)
|
||||
|
||||
user = await self.admin_handler.get_user(target_user)
|
||||
assert user is not None
|
||||
|
||||
|
@ -384,12 +373,15 @@ class UserRestServletV2(RestServlet):
|
|||
)
|
||||
|
||||
if external_ids is not None:
|
||||
for auth_provider, external_id in new_external_ids:
|
||||
await self.store.record_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
user_id,
|
||||
)
|
||||
try:
|
||||
for auth_provider, external_id in new_external_ids:
|
||||
await self.store.record_user_external_id(
|
||||
auth_provider,
|
||||
external_id,
|
||||
user_id,
|
||||
)
|
||||
except ExternalIDReuseException:
|
||||
raise SynapseError(409, "External id is already in use.")
|
||||
|
||||
if "avatar_url" in body and isinstance(body["avatar_url"], str):
|
||||
await self.profile_handler.set_avatar_url(
|
||||
|
|
|
@ -128,9 +128,10 @@ class RelationSendServlet(RestServlet):
|
|||
|
||||
content["m.relates_to"] = {
|
||||
"event_id": parent_id,
|
||||
"key": aggregation_key,
|
||||
"rel_type": relation_type,
|
||||
}
|
||||
if aggregation_key is not None:
|
||||
content["m.relates_to"]["key"] = aggregation_key
|
||||
|
||||
event_dict = {
|
||||
"type": event_type,
|
||||
|
@ -232,12 +233,12 @@ class RelationPaginationServlet(RestServlet):
|
|||
# Similarly, we don't allow relations to be applied to relations, so we
|
||||
# return the original relations without any aggregations on top of them
|
||||
# here.
|
||||
events = await self._event_serializer.serialize_events(
|
||||
serialized_events = await self._event_serializer.serialize_events(
|
||||
events, now, bundle_aggregations=False
|
||||
)
|
||||
|
||||
return_value = pagination_chunk.to_dict()
|
||||
return_value["chunk"] = events
|
||||
return_value["chunk"] = serialized_events
|
||||
return_value["original_event"] = original_event
|
||||
|
||||
return 200, return_value
|
||||
|
@ -416,10 +417,10 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
|
|||
)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
events = await self._event_serializer.serialize_events(events, now)
|
||||
serialized_events = await self._event_serializer.serialize_events(events, now)
|
||||
|
||||
return_value = result.to_dict()
|
||||
return_value["chunk"] = events
|
||||
return_value["chunk"] = serialized_events
|
||||
|
||||
return 200, return_value
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ from synapse.http.servlet import (
|
|||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.client.transactions import HttpTransactionCache
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
@ -164,11 +163,6 @@ class RoomBatchSendEventRestServlet(RestServlet):
|
|||
base_insertion_event = None
|
||||
if batch_id_from_query:
|
||||
batch_id_to_connect_to = batch_id_from_query
|
||||
# All but the first base insertion event should point at a fake
|
||||
# event, which causes the HS to ask for the state at the start of
|
||||
# the batch later.
|
||||
fake_prev_event_id = "$" + random_string(43)
|
||||
prev_event_ids = [fake_prev_event_id]
|
||||
# Otherwise, create an insertion event to act as a starting point.
|
||||
#
|
||||
# We don't always have an insertion event to start hanging more history
|
||||
|
@ -177,8 +171,6 @@ class RoomBatchSendEventRestServlet(RestServlet):
|
|||
# an insertion event), in which case we just create a new insertion event
|
||||
# that can then get pointed to by a "marker" event later.
|
||||
else:
|
||||
prev_event_ids = prev_event_ids_from_query
|
||||
|
||||
base_insertion_event_dict = (
|
||||
self.room_batch_handler.create_insertion_event_dict(
|
||||
sender=requester.user.to_string(),
|
||||
|
@ -186,7 +178,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
|
|||
origin_server_ts=last_event_in_batch["origin_server_ts"],
|
||||
)
|
||||
)
|
||||
base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
|
||||
base_insertion_event_dict["prev_events"] = prev_event_ids_from_query.copy()
|
||||
|
||||
(
|
||||
base_insertion_event,
|
||||
|
@ -207,6 +199,11 @@ class RoomBatchSendEventRestServlet(RestServlet):
|
|||
EventContentFields.MSC2716_NEXT_BATCH_ID
|
||||
]
|
||||
|
||||
# Also connect the historical event chain to the end of the floating
|
||||
# state chain, which causes the HS to ask for the state at the start of
|
||||
# the batch later.
|
||||
prev_event_ids = [state_event_ids_at_start[-1]]
|
||||
|
||||
# Create and persist all of the historical events as well as insertion
|
||||
# and batch meta events to make the batch navigable in the DAG.
|
||||
event_ids, next_batch_id = await self.room_batch_handler.handle_batch_of_events(
|
||||
|
|
|
@ -16,12 +16,15 @@
|
|||
import functools
|
||||
import os
|
||||
import re
|
||||
from typing import Any, Callable, List
|
||||
from typing import Any, Callable, List, TypeVar, cast
|
||||
|
||||
NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d")
|
||||
|
||||
|
||||
def _wrap_in_base_path(func: Callable[..., str]) -> Callable[..., str]:
|
||||
F = TypeVar("F", bound=Callable[..., str])
|
||||
|
||||
|
||||
def _wrap_in_base_path(func: F) -> F:
|
||||
"""Takes a function that returns a relative path and turns it into an
|
||||
absolute path based on the location of the primary media store
|
||||
"""
|
||||
|
@ -31,7 +34,7 @@ def _wrap_in_base_path(func: Callable[..., str]) -> Callable[..., str]:
|
|||
path = func(self, *args, **kwargs)
|
||||
return os.path.join(self.base_path, path)
|
||||
|
||||
return _wrapped
|
||||
return cast(F, _wrapped)
|
||||
|
||||
|
||||
class MediaFilePaths:
|
||||
|
@ -45,23 +48,6 @@ class MediaFilePaths:
|
|||
def __init__(self, primary_base_path: str):
|
||||
self.base_path = primary_base_path
|
||||
|
||||
def default_thumbnail_rel(
|
||||
self,
|
||||
default_top_level: str,
|
||||
default_sub_type: str,
|
||||
width: int,
|
||||
height: int,
|
||||
content_type: str,
|
||||
method: str,
|
||||
) -> str:
|
||||
top_level_type, sub_type = content_type.split("/")
|
||||
file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method)
|
||||
return os.path.join(
|
||||
"default_thumbnails", default_top_level, default_sub_type, file_name
|
||||
)
|
||||
|
||||
default_thumbnail = _wrap_in_base_path(default_thumbnail_rel)
|
||||
|
||||
def local_media_filepath_rel(self, media_id: str) -> str:
|
||||
return os.path.join("local_content", media_id[0:2], media_id[2:4], media_id[4:])
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@ from typing import TYPE_CHECKING, List, Optional
|
|||
|
||||
import attr
|
||||
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_decoder
|
||||
|
||||
|
@ -48,7 +47,7 @@ class OEmbedProvider:
|
|||
requesting/parsing oEmbed content.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer", client: SimpleHttpClient):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._oembed_patterns = {}
|
||||
for oembed_endpoint in hs.config.oembed.oembed_patterns:
|
||||
api_endpoint = oembed_endpoint.api_endpoint
|
||||
|
@ -69,7 +68,6 @@ class OEmbedProvider:
|
|||
# Iterate through each URL pattern and point it to the endpoint.
|
||||
for pattern in oembed_endpoint.url_patterns:
|
||||
self._oembed_patterns[pattern] = api_endpoint
|
||||
self._client = client
|
||||
|
||||
def get_oembed_url(self, url: str) -> Optional[str]:
|
||||
"""
|
||||
|
@ -139,10 +137,11 @@ class OEmbedProvider:
|
|||
# oEmbed responses *must* be UTF-8 according to the spec.
|
||||
oembed = json_decoder.decode(raw_body.decode("utf-8"))
|
||||
|
||||
# Ensure there's a version of 1.0.
|
||||
oembed_version = oembed["version"]
|
||||
if oembed_version != "1.0":
|
||||
raise RuntimeError(f"Invalid version: {oembed_version}")
|
||||
# The version is a required string field, but not always provided,
|
||||
# or sometimes provided as a float. Be lenient.
|
||||
oembed_version = oembed.get("version", "1.0")
|
||||
if oembed_version != "1.0" and oembed_version != 1:
|
||||
raise RuntimeError(f"Invalid oEmbed version: {oembed_version}")
|
||||
|
||||
# Ensure the cache age is None or an int.
|
||||
cache_age = oembed.get("cache_age")
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import codecs
|
||||
import datetime
|
||||
import errno
|
||||
import fnmatch
|
||||
|
@ -22,7 +23,7 @@ import re
|
|||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Set, Tuple, Union
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import attr
|
||||
|
@ -140,7 +141,7 @@ class PreviewUrlResource(DirectServeJsonResource):
|
|||
self.primary_base_path = media_repo.primary_base_path
|
||||
self.media_storage = media_storage
|
||||
|
||||
self._oembed = OEmbedProvider(hs, self.client)
|
||||
self._oembed = OEmbedProvider(hs)
|
||||
|
||||
# We run the background jobs if we're the instance specified (or no
|
||||
# instance is specified, where we assume there is only one instance
|
||||
|
@ -295,8 +296,7 @@ class PreviewUrlResource(DirectServeJsonResource):
|
|||
with open(media_info.filename, "rb") as file:
|
||||
body = file.read()
|
||||
|
||||
encoding = get_html_media_encoding(body, media_info.media_type)
|
||||
tree = decode_body(body, encoding)
|
||||
tree = decode_body(body, media_info.uri, media_info.media_type)
|
||||
if tree is not None:
|
||||
# Check if this HTML document points to oEmbed information and
|
||||
# defer to that.
|
||||
|
@ -632,16 +632,27 @@ class PreviewUrlResource(DirectServeJsonResource):
|
|||
logger.debug("No media removed from url cache")
|
||||
|
||||
|
||||
def get_html_media_encoding(body: bytes, content_type: str) -> str:
|
||||
def _normalise_encoding(encoding: str) -> Optional[str]:
|
||||
"""Use the Python codec's name as the normalised entry."""
|
||||
try:
|
||||
return codecs.lookup(encoding).name
|
||||
except LookupError:
|
||||
return None
|
||||
|
||||
|
||||
def get_html_media_encodings(body: bytes, content_type: Optional[str]) -> Iterable[str]:
|
||||
"""
|
||||
Get the encoding of the body based on the (presumably) HTML body or media_type.
|
||||
Get potential encoding of the body based on the (presumably) HTML body or the content-type header.
|
||||
|
||||
The precedence used for finding a character encoding is:
|
||||
|
||||
1. meta tag with a charset declared.
|
||||
1. <meta> tag with a charset declared.
|
||||
2. The XML document's character encoding attribute.
|
||||
3. The Content-Type header.
|
||||
4. Fallback to UTF-8.
|
||||
4. Fallback to utf-8.
|
||||
5. Fallback to windows-1252.
|
||||
|
||||
This roughly follows the algorithm used by BeautifulSoup's bs4.dammit.EncodingDetector.
|
||||
|
||||
Args:
|
||||
body: The HTML document, as bytes.
|
||||
|
@ -650,39 +661,55 @@ def get_html_media_encoding(body: bytes, content_type: str) -> str:
|
|||
Returns:
|
||||
The character encoding of the body, as a string.
|
||||
"""
|
||||
# There's no point in returning an encoding more than once.
|
||||
attempted_encodings: Set[str] = set()
|
||||
|
||||
# Limit searches to the first 1kb, since it ought to be at the top.
|
||||
body_start = body[:1024]
|
||||
|
||||
# Let's try and figure out if it has an encoding set in a meta tag.
|
||||
# Check if it has an encoding set in a meta tag.
|
||||
match = _charset_match.search(body_start)
|
||||
if match:
|
||||
return match.group(1).decode("ascii")
|
||||
encoding = _normalise_encoding(match.group(1).decode("ascii"))
|
||||
if encoding:
|
||||
attempted_encodings.add(encoding)
|
||||
yield encoding
|
||||
|
||||
# TODO Support <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
|
||||
|
||||
# If we didn't find a match, see if it an XML document with an encoding.
|
||||
# Check if it has an XML document with an encoding.
|
||||
match = _xml_encoding_match.match(body_start)
|
||||
if match:
|
||||
return match.group(1).decode("ascii")
|
||||
encoding = _normalise_encoding(match.group(1).decode("ascii"))
|
||||
if encoding and encoding not in attempted_encodings:
|
||||
attempted_encodings.add(encoding)
|
||||
yield encoding
|
||||
|
||||
# If we don't find a match, we'll look at the HTTP Content-Type, and
|
||||
# if that doesn't exist, we'll fall back to UTF-8.
|
||||
content_match = _content_type_match.match(content_type)
|
||||
if content_match:
|
||||
return content_match.group(1)
|
||||
# Check the HTTP Content-Type header for a character set.
|
||||
if content_type:
|
||||
content_match = _content_type_match.match(content_type)
|
||||
if content_match:
|
||||
encoding = _normalise_encoding(content_match.group(1))
|
||||
if encoding and encoding not in attempted_encodings:
|
||||
attempted_encodings.add(encoding)
|
||||
yield encoding
|
||||
|
||||
return "utf-8"
|
||||
# Finally, fallback to UTF-8, then windows-1252.
|
||||
for fallback in ("utf-8", "cp1252"):
|
||||
if fallback not in attempted_encodings:
|
||||
yield fallback
|
||||
|
||||
|
||||
def decode_body(
|
||||
body: bytes, request_encoding: Optional[str] = None
|
||||
body: bytes, uri: str, content_type: Optional[str] = None
|
||||
) -> Optional["etree.Element"]:
|
||||
"""
|
||||
This uses lxml to parse the HTML document.
|
||||
|
||||
Args:
|
||||
body: The HTML document, as bytes.
|
||||
request_encoding: The character encoding of the body, as a string.
|
||||
uri: The URI used to download the body.
|
||||
content_type: The Content-Type header.
|
||||
|
||||
Returns:
|
||||
The parsed HTML body, or None if an error occurred during processed.
|
||||
|
@ -691,32 +718,25 @@ def decode_body(
|
|||
if not body:
|
||||
return None
|
||||
|
||||
from lxml import etree
|
||||
|
||||
# Create an HTML parser. If this fails, log and return no metadata.
|
||||
try:
|
||||
parser = etree.HTMLParser(recover=True, encoding=request_encoding)
|
||||
except LookupError:
|
||||
# blindly consider the encoding as utf-8.
|
||||
parser = etree.HTMLParser(recover=True, encoding="utf-8")
|
||||
except Exception as e:
|
||||
logger.warning("Unable to create HTML parser: %s" % (e,))
|
||||
for encoding in get_html_media_encodings(body, content_type):
|
||||
try:
|
||||
body_str = body.decode(encoding)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
logger.warning("Unable to decode HTML body for %s", uri)
|
||||
return None
|
||||
|
||||
def _attempt_decode_body(
|
||||
body_attempt: Union[bytes, str]
|
||||
) -> Optional["etree.Element"]:
|
||||
# Attempt to parse the body. Returns None if the body was successfully
|
||||
# parsed, but no tree was found.
|
||||
return etree.fromstring(body_attempt, parser)
|
||||
from lxml import etree
|
||||
|
||||
# Attempt to parse the body. If this fails, log and return no metadata.
|
||||
try:
|
||||
return _attempt_decode_body(body)
|
||||
except UnicodeDecodeError:
|
||||
# blindly try decoding the body as utf-8, which seems to fix
|
||||
# the charset mismatches on https://google.com
|
||||
return _attempt_decode_body(body.decode("utf-8", "ignore"))
|
||||
# Create an HTML parser.
|
||||
parser = etree.HTMLParser(recover=True, encoding="utf-8")
|
||||
|
||||
# Attempt to parse the body. Returns None if the body was successfully
|
||||
# parsed, but no tree was found.
|
||||
return etree.fromstring(body_str, parser)
|
||||
|
||||
|
||||
def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue