2016-04-03 07:59:27 -04:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-24 18:47:27 -05:00
|
|
|
# Copyright 2016 OpenMarket Ltd
|
2021-01-15 10:57:37 -05:00
|
|
|
# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
|
2016-01-24 18:47:27 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-02-01 19:35:18 -05:00
|
|
|
import datetime
|
|
|
|
import errno
|
|
|
|
import fnmatch
|
|
|
|
import itertools
|
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import shutil
|
|
|
|
import sys
|
|
|
|
import traceback
|
2021-01-15 10:57:37 -05:00
|
|
|
from typing import TYPE_CHECKING, Any, Dict, Generator, Iterable, Optional, Union
|
2020-06-16 08:51:47 -04:00
|
|
|
from urllib import parse as urlparse
|
2018-07-09 02:09:20 -04:00
|
|
|
|
2020-07-27 07:50:44 -04:00
|
|
|
import attr
|
2016-01-24 18:47:27 -05:00
|
|
|
|
2019-05-10 13:32:44 -04:00
|
|
|
from twisted.internet.error import DNSLookupError
|
2021-01-15 10:57:37 -05:00
|
|
|
from twisted.web.http import Request
|
2016-04-03 07:56:29 -04:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.api.errors import Codes, SynapseError
|
2018-12-21 09:56:13 -05:00
|
|
|
from synapse.http.client import SimpleHttpClient
|
2016-04-03 07:56:29 -04:00
|
|
|
from synapse.http.server import (
|
2020-07-03 14:02:19 -04:00
|
|
|
DirectServeJsonResource,
|
2017-11-23 12:52:31 -05:00
|
|
|
respond_with_json,
|
2018-07-09 02:09:20 -04:00
|
|
|
respond_with_json_bytes,
|
2016-04-03 07:56:29 -04:00
|
|
|
)
|
2018-07-13 15:40:14 -04:00
|
|
|
from synapse.http.servlet import parse_integer, parse_string
|
2019-07-03 10:07:04 -04:00
|
|
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
2018-07-25 04:41:12 -04:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2018-11-15 16:55:58 -05:00
|
|
|
from synapse.rest.media.v1._base import get_filename_from_headers
|
2021-01-15 10:57:37 -05:00
|
|
|
from synapse.rest.media.v1.media_storage import MediaStorage
|
2020-08-07 08:02:55 -04:00
|
|
|
from synapse.util import json_encoder
|
2018-08-10 09:50:21 -04:00
|
|
|
from synapse.util.async_helpers import ObservableDeferred
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
2018-11-15 16:55:58 -05:00
|
|
|
from synapse.util.stringutils import random_string
|
2018-07-09 02:09:20 -04:00
|
|
|
|
|
|
|
from ._base import FileInfo
|
2016-03-28 22:13:25 -04:00
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from lxml import etree
|
|
|
|
|
|
|
|
from synapse.app.homeserver import HomeServer
|
|
|
|
from synapse.rest.media.v1.media_repository import MediaRepository
|
|
|
|
|
2016-01-24 18:47:27 -05:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2018-11-15 12:05:08 -05:00
|
|
|
_charset_match = re.compile(br"<\s*meta[^>]*charset\s*=\s*([a-z0-9-]+)", flags=re.I)
|
|
|
|
_content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
|
|
|
|
|
2019-11-05 10:45:17 -05:00
|
|
|
OG_TAG_NAME_MAXLEN = 50
|
|
|
|
OG_TAG_VALUE_MAXLEN = 1000
|
|
|
|
|
2020-07-27 07:50:44 -04:00
|
|
|
ONE_HOUR = 60 * 60 * 1000
|
|
|
|
|
|
|
|
# A map of globs to API endpoints.
|
|
|
|
_oembed_globs = {
|
|
|
|
# Twitter.
|
|
|
|
"https://publish.twitter.com/oembed": [
|
|
|
|
"https://twitter.com/*/status/*",
|
|
|
|
"https://*.twitter.com/*/status/*",
|
|
|
|
"https://twitter.com/*/moments/*",
|
|
|
|
"https://*.twitter.com/*/moments/*",
|
|
|
|
# Include the HTTP versions too.
|
|
|
|
"http://twitter.com/*/status/*",
|
|
|
|
"http://*.twitter.com/*/status/*",
|
|
|
|
"http://twitter.com/*/moments/*",
|
|
|
|
"http://*.twitter.com/*/moments/*",
|
|
|
|
],
|
|
|
|
}
|
|
|
|
# Convert the globs to regular expressions.
|
|
|
|
_oembed_patterns = {}
|
|
|
|
for endpoint, globs in _oembed_globs.items():
|
|
|
|
for glob in globs:
|
|
|
|
# Convert the glob into a sane regular expression to match against. The
|
|
|
|
# rules followed will be slightly different for the domain portion vs.
|
|
|
|
# the rest.
|
|
|
|
#
|
|
|
|
# 1. The scheme must be one of HTTP / HTTPS (and have no globs).
|
|
|
|
# 2. The domain can have globs, but we limit it to characters that can
|
|
|
|
# reasonably be a domain part.
|
|
|
|
# TODO: This does not attempt to handle Unicode domain names.
|
|
|
|
# 3. Other parts allow a glob to be any one, or more, characters.
|
|
|
|
results = urlparse.urlparse(glob)
|
|
|
|
|
|
|
|
# Ensure the scheme does not have wildcards (and is a sane scheme).
|
|
|
|
if results.scheme not in {"http", "https"}:
|
|
|
|
raise ValueError("Insecure oEmbed glob scheme: %s" % (results.scheme,))
|
|
|
|
|
|
|
|
pattern = urlparse.urlunparse(
|
|
|
|
[
|
|
|
|
results.scheme,
|
|
|
|
re.escape(results.netloc).replace("\\*", "[a-zA-Z0-9_-]+"),
|
|
|
|
]
|
|
|
|
+ [re.escape(part).replace("\\*", ".+") for part in results[2:]]
|
|
|
|
)
|
|
|
|
_oembed_patterns[re.compile(pattern)] = endpoint
|
|
|
|
|
|
|
|
|
2020-09-14 12:50:06 -04:00
|
|
|
@attr.s(slots=True)
|
2020-07-27 07:50:44 -04:00
|
|
|
class OEmbedResult:
|
|
|
|
# Either HTML content or URL must be provided.
|
|
|
|
html = attr.ib(type=Optional[str])
|
|
|
|
url = attr.ib(type=Optional[str])
|
|
|
|
title = attr.ib(type=Optional[str])
|
|
|
|
# Number of seconds to cache the content.
|
|
|
|
cache_age = attr.ib(type=int)
|
|
|
|
|
|
|
|
|
|
|
|
class OEmbedError(Exception):
|
|
|
|
"""An error occurred processing the oEmbed object."""
|
|
|
|
|
2016-04-03 07:56:29 -04:00
|
|
|
|
2020-07-03 14:02:19 -04:00
|
|
|
class PreviewUrlResource(DirectServeJsonResource):
|
2016-01-24 18:47:27 -05:00
|
|
|
isLeaf = True
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
hs: "HomeServer",
|
|
|
|
media_repo: "MediaRepository",
|
|
|
|
media_storage: MediaStorage,
|
|
|
|
):
|
2019-06-29 03:06:55 -04:00
|
|
|
super().__init__()
|
2016-04-19 09:45:05 -04:00
|
|
|
|
|
|
|
self.auth = hs.get_auth()
|
|
|
|
self.clock = hs.get_clock()
|
|
|
|
self.filepaths = media_repo.filepaths
|
|
|
|
self.max_spider_size = hs.config.max_spider_size
|
|
|
|
self.server_name = hs.hostname
|
2016-04-19 09:48:24 -04:00
|
|
|
self.store = hs.get_datastore()
|
2018-12-21 09:56:13 -05:00
|
|
|
self.client = SimpleHttpClient(
|
|
|
|
hs,
|
|
|
|
treq_args={"browser_like_redirects": True},
|
|
|
|
ip_whitelist=hs.config.url_preview_ip_range_whitelist,
|
|
|
|
ip_blacklist=hs.config.url_preview_ip_range_blacklist,
|
2019-11-26 13:10:50 -05:00
|
|
|
http_proxy=os.getenvb(b"http_proxy"),
|
|
|
|
https_proxy=os.getenvb(b"HTTPS_PROXY"),
|
2018-12-21 09:56:13 -05:00
|
|
|
)
|
2016-04-19 09:51:34 -04:00
|
|
|
self.media_repo = media_repo
|
2017-10-12 12:31:24 -04:00
|
|
|
self.primary_base_path = media_repo.primary_base_path
|
2018-01-09 09:36:07 -05:00
|
|
|
self.media_storage = media_storage
|
2016-04-19 09:45:05 -04:00
|
|
|
|
2020-06-17 09:13:30 -04:00
|
|
|
# We run the background jobs if we're the instance specified (or no
|
|
|
|
# instance is specified, where we assume there is only one instance
|
|
|
|
# serving media).
|
|
|
|
instance_running_jobs = hs.config.media.media_instance_running_background_jobs
|
|
|
|
self._worker_run_media_background_jobs = (
|
|
|
|
instance_running_jobs is None
|
|
|
|
or instance_running_jobs == hs.get_instance_name()
|
|
|
|
)
|
|
|
|
|
2016-05-16 08:03:59 -04:00
|
|
|
self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist
|
2020-04-15 08:35:29 -04:00
|
|
|
self.url_preview_accept_language = hs.config.url_preview_accept_language
|
2016-04-02 19:31:57 -04:00
|
|
|
|
2017-11-10 11:34:33 -05:00
|
|
|
# memory cache mapping urls to an ObservableDeferred returning
|
|
|
|
# JSON-encoded OG metadata
|
|
|
|
self._cache = ExpiringCache(
|
2016-04-03 07:56:29 -04:00
|
|
|
cache_name="url_previews",
|
|
|
|
clock=self.clock,
|
|
|
|
# don't spider URLs more often than once an hour
|
2020-07-27 07:50:44 -04:00
|
|
|
expiry_ms=ONE_HOUR,
|
2016-04-01 19:35:49 -04:00
|
|
|
)
|
2016-04-02 19:47:40 -04:00
|
|
|
|
2020-06-17 09:13:30 -04:00
|
|
|
if self._worker_run_media_background_jobs:
|
|
|
|
self._cleaner_loop = self.clock.looping_call(
|
|
|
|
self._start_expire_url_cache_data, 10 * 1000
|
|
|
|
)
|
2017-09-28 07:18:06 -04:00
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
async def _async_render_OPTIONS(self, request: Request) -> None:
|
2019-07-02 14:01:28 -04:00
|
|
|
request.setHeader(b"Allow", b"OPTIONS, GET")
|
2020-07-03 14:02:19 -04:00
|
|
|
respond_with_json(request, 200, {}, send_cors=True)
|
2017-11-23 12:52:31 -05:00
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
async def _async_render_GET(self, request: Request) -> None:
|
2016-04-03 07:56:29 -04:00
|
|
|
|
2016-04-11 05:39:16 -04:00
|
|
|
# XXX: if get_user_by_req fails, what should we do in an async render?
|
2019-06-29 03:06:55 -04:00
|
|
|
requester = await self.auth.get_user_by_req(request)
|
2018-07-13 15:40:14 -04:00
|
|
|
url = parse_string(request, "url")
|
2018-11-07 09:37:43 -05:00
|
|
|
if b"ts" in request.args:
|
2018-07-13 15:40:14 -04:00
|
|
|
ts = parse_integer(request, "ts")
|
2016-04-11 05:39:16 -04:00
|
|
|
else:
|
|
|
|
ts = self.clock.time_msec()
|
|
|
|
|
2017-11-10 11:34:33 -05:00
|
|
|
# XXX: we could move this into _do_preview if we wanted.
|
2016-05-16 08:03:59 -04:00
|
|
|
url_tuple = urlparse.urlsplit(url)
|
|
|
|
for entry in self.url_preview_url_blacklist:
|
|
|
|
match = True
|
|
|
|
for attrib in entry:
|
|
|
|
pattern = entry[attrib]
|
|
|
|
value = getattr(url_tuple, attrib)
|
2019-06-20 05:32:02 -04:00
|
|
|
logger.debug(
|
2019-11-21 07:00:14 -05:00
|
|
|
"Matching attrib '%s' with value '%s' against pattern '%s'",
|
2019-10-24 13:43:13 -04:00
|
|
|
attrib,
|
|
|
|
value,
|
|
|
|
pattern,
|
2019-06-20 05:32:02 -04:00
|
|
|
)
|
2016-05-16 08:03:59 -04:00
|
|
|
|
|
|
|
if value is None:
|
|
|
|
match = False
|
|
|
|
continue
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if pattern.startswith("^"):
|
2016-05-16 08:03:59 -04:00
|
|
|
if not re.match(pattern, getattr(url_tuple, attrib)):
|
2016-04-11 05:39:16 -04:00
|
|
|
match = False
|
|
|
|
continue
|
2016-05-16 08:03:59 -04:00
|
|
|
else:
|
|
|
|
if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
|
|
|
|
match = False
|
|
|
|
continue
|
|
|
|
if match:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
|
2016-05-16 08:03:59 -04:00
|
|
|
raise SynapseError(
|
2019-06-20 05:32:02 -04:00
|
|
|
403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
|
2016-05-16 08:03:59 -04:00
|
|
|
)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2017-11-10 11:34:33 -05:00
|
|
|
# the in-memory cache:
|
|
|
|
# * ensures that only one request is active at a time
|
|
|
|
# * takes load off the DB for the thundering herds
|
|
|
|
# * also caches any failures (unlike the DB) so we don't keep
|
|
|
|
# requesting the same endpoint
|
|
|
|
|
|
|
|
observable = self._cache.get(url)
|
|
|
|
|
|
|
|
if not observable:
|
2019-06-20 05:32:02 -04:00
|
|
|
download = run_in_background(self._do_preview, url, requester.user, ts)
|
|
|
|
observable = ObservableDeferred(download, consumeErrors=True)
|
2017-11-10 11:34:33 -05:00
|
|
|
self._cache[url] = observable
|
2017-11-10 11:58:04 -05:00
|
|
|
else:
|
|
|
|
logger.info("Returning cached response")
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2020-08-07 09:44:48 -04:00
|
|
|
og = await make_deferred_yieldable(observable.observe())
|
2017-11-10 11:34:33 -05:00
|
|
|
respond_with_json_bytes(request, 200, og, send_cors=True)
|
|
|
|
|
2020-07-27 14:40:11 -04:00
|
|
|
async def _do_preview(self, url: str, user: str, ts: int) -> bytes:
|
2017-11-10 11:34:33 -05:00
|
|
|
"""Check the db, and download the URL and build a preview
|
|
|
|
|
|
|
|
Args:
|
2020-07-27 14:40:11 -04:00
|
|
|
url: The URL to preview.
|
|
|
|
user: The user requesting the preview.
|
|
|
|
ts: The timestamp requested for the preview.
|
2017-11-10 11:34:33 -05:00
|
|
|
|
|
|
|
Returns:
|
2020-07-27 14:40:11 -04:00
|
|
|
json-encoded og data
|
2017-11-10 11:34:33 -05:00
|
|
|
"""
|
|
|
|
# check the URL cache in the DB (which will also provide us with
|
2016-04-11 05:39:16 -04:00
|
|
|
# historical previews, if we have any)
|
2020-03-20 07:20:02 -04:00
|
|
|
cache_result = await self.store.get_url_cache(url, ts)
|
2016-04-11 05:39:16 -04:00
|
|
|
if (
|
2019-06-20 05:32:02 -04:00
|
|
|
cache_result
|
|
|
|
and cache_result["expires_ts"] > ts
|
|
|
|
and cache_result["response_code"] / 100 == 2
|
2016-04-11 05:39:16 -04:00
|
|
|
):
|
2018-11-07 09:37:43 -05:00
|
|
|
# It may be stored as text in the database, not as bytes (such as
|
|
|
|
# PostgreSQL). If so, encode it back before handing it on.
|
|
|
|
og = cache_result["og"]
|
2020-06-16 08:51:47 -04:00
|
|
|
if isinstance(og, str):
|
2019-06-20 05:32:02 -04:00
|
|
|
og = og.encode("utf8")
|
2019-07-23 09:00:55 -04:00
|
|
|
return og
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2020-03-20 07:20:02 -04:00
|
|
|
media_info = await self._download_url(url, user)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2019-10-24 13:31:53 -04:00
|
|
|
logger.debug("got media_info of '%s'", media_info)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if _is_media(media_info["media_type"]):
|
|
|
|
file_id = media_info["filesystem_id"]
|
2020-03-20 07:20:02 -04:00
|
|
|
dims = await self.media_repo._generate_thumbnails(
|
2019-06-20 05:32:02 -04:00
|
|
|
None, file_id, file_id, media_info["media_type"], url_cache=True
|
2016-04-02 19:31:57 -04:00
|
|
|
)
|
2016-03-31 09:15:09 -04:00
|
|
|
|
2016-04-11 05:39:16 -04:00
|
|
|
og = {
|
2019-06-20 05:32:02 -04:00
|
|
|
"og:description": media_info["download_name"],
|
|
|
|
"og:image": "mxc://%s/%s"
|
|
|
|
% (self.server_name, media_info["filesystem_id"]),
|
|
|
|
"og:image:type": media_info["media_type"],
|
|
|
|
"matrix:image:size": media_info["media_length"],
|
2016-04-11 05:39:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if dims:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:image:width"] = dims["width"]
|
|
|
|
og["og:image:height"] = dims["height"]
|
2016-04-11 05:39:16 -04:00
|
|
|
else:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("Couldn't get dims for %s" % url)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
|
|
|
# define our OG response for this media
|
2019-06-20 05:32:02 -04:00
|
|
|
elif _is_html(media_info["media_type"]):
|
2016-04-11 05:39:16 -04:00
|
|
|
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
with open(media_info["filename"], "rb") as file:
|
2018-11-07 09:37:43 -05:00
|
|
|
body = file.read()
|
2016-04-15 09:32:25 -04:00
|
|
|
|
2018-11-15 12:05:08 -05:00
|
|
|
encoding = None
|
|
|
|
|
|
|
|
# Let's try and figure out if it has an encoding set in a meta tag.
|
|
|
|
# Limit it to the first 1kb, since it ought to be in the meta tags
|
|
|
|
# at the top.
|
|
|
|
match = _charset_match.search(body[:1000])
|
|
|
|
|
|
|
|
# If we find a match, it should take precedence over the
|
|
|
|
# Content-Type header, so set it here.
|
|
|
|
if match:
|
2019-06-20 05:32:02 -04:00
|
|
|
encoding = match.group(1).decode("ascii")
|
2018-11-15 12:05:08 -05:00
|
|
|
|
|
|
|
# If we don't find a match, we'll look at the HTTP Content-Type, and
|
|
|
|
# if that doesn't exist, we'll fall back to UTF-8.
|
|
|
|
if not encoding:
|
2020-01-20 12:38:21 -05:00
|
|
|
content_match = _content_type_match.match(media_info["media_type"])
|
|
|
|
encoding = content_match.group(1) if content_match else "utf-8"
|
2016-04-15 09:32:25 -04:00
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
og = decode_and_calc_og(body, media_info["uri"], encoding)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2016-08-16 09:53:18 -04:00
|
|
|
# pre-cache the image for posterity
|
|
|
|
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
|
|
|
# request itself and benefit from the same caching etc. But for now we
|
|
|
|
# just rely on the caching on the master request to speed things up.
|
2019-06-20 05:32:02 -04:00
|
|
|
if "og:image" in og and og["og:image"]:
|
2020-03-20 07:20:02 -04:00
|
|
|
image_info = await self._download_url(
|
2019-06-20 05:32:02 -04:00
|
|
|
_rebase_url(og["og:image"], media_info["uri"]), user
|
2016-08-16 09:53:18 -04:00
|
|
|
)
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if _is_media(image_info["media_type"]):
|
2016-08-16 09:53:18 -04:00
|
|
|
# TODO: make sure we don't choke on white-on-transparent images
|
2019-06-20 05:32:02 -04:00
|
|
|
file_id = image_info["filesystem_id"]
|
2020-03-20 07:20:02 -04:00
|
|
|
dims = await self.media_repo._generate_thumbnails(
|
2019-06-20 05:32:02 -04:00
|
|
|
None, file_id, file_id, image_info["media_type"], url_cache=True
|
2016-08-16 09:53:18 -04:00
|
|
|
)
|
|
|
|
if dims:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:image:width"] = dims["width"]
|
|
|
|
og["og:image:height"] = dims["height"]
|
2016-08-16 09:53:18 -04:00
|
|
|
else:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("Couldn't get dims for %s", og["og:image"])
|
2016-08-16 09:53:18 -04:00
|
|
|
|
|
|
|
og["og:image"] = "mxc://%s/%s" % (
|
2019-06-20 05:32:02 -04:00
|
|
|
self.server_name,
|
|
|
|
image_info["filesystem_id"],
|
2016-08-16 09:53:18 -04:00
|
|
|
)
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:image:type"] = image_info["media_type"]
|
|
|
|
og["matrix:image:size"] = image_info["media_length"]
|
2016-08-16 09:53:18 -04:00
|
|
|
else:
|
|
|
|
del og["og:image"]
|
2016-04-11 05:39:16 -04:00
|
|
|
else:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("Failed to find any OG data in %s", url)
|
2016-04-11 05:39:16 -04:00
|
|
|
og = {}
|
|
|
|
|
2019-11-05 10:45:17 -05:00
|
|
|
# filter out any stupidly long values
|
|
|
|
keys_to_remove = []
|
|
|
|
for k, v in og.items():
|
2019-11-05 12:22:58 -05:00
|
|
|
# values can be numeric as well as strings, hence the cast to str
|
|
|
|
if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN:
|
2019-11-05 10:45:17 -05:00
|
|
|
logger.warning(
|
|
|
|
"Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN]
|
|
|
|
)
|
|
|
|
keys_to_remove.append(k)
|
|
|
|
for k in keys_to_remove:
|
|
|
|
del og[k]
|
|
|
|
|
2019-10-24 13:31:53 -04:00
|
|
|
logger.debug("Calculated OG for %s as %s", url, og)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
2020-08-07 08:02:55 -04:00
|
|
|
jsonog = json_encoder.encode(og)
|
2016-04-11 05:39:16 -04:00
|
|
|
|
|
|
|
# store OG in history-aware DB cache
|
2020-03-20 07:20:02 -04:00
|
|
|
await self.store.store_url_cache(
|
2016-04-11 05:39:16 -04:00
|
|
|
url,
|
|
|
|
media_info["response_code"],
|
|
|
|
media_info["etag"],
|
2017-09-28 07:37:53 -04:00
|
|
|
media_info["expires"] + media_info["created_ts"],
|
2017-11-10 11:34:33 -05:00
|
|
|
jsonog,
|
2016-04-11 05:39:16 -04:00
|
|
|
media_info["filesystem_id"],
|
|
|
|
media_info["created_ts"],
|
|
|
|
)
|
|
|
|
|
2019-10-10 09:52:29 -04:00
|
|
|
return jsonog.encode("utf8")
|
2016-04-08 16:36:48 -04:00
|
|
|
|
2020-07-27 07:50:44 -04:00
|
|
|
def _get_oembed_url(self, url: str) -> Optional[str]:
|
|
|
|
"""
|
|
|
|
Check whether the URL should be downloaded as oEmbed content instead.
|
|
|
|
|
2021-01-26 07:32:17 -05:00
|
|
|
Args:
|
2020-07-27 07:50:44 -04:00
|
|
|
url: The URL to check.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A URL to use instead or None if the original URL should be used.
|
|
|
|
"""
|
|
|
|
for url_pattern, endpoint in _oembed_patterns.items():
|
|
|
|
if url_pattern.fullmatch(url):
|
|
|
|
return endpoint
|
|
|
|
|
|
|
|
# No match.
|
|
|
|
return None
|
|
|
|
|
|
|
|
async def _get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
|
|
|
|
"""
|
|
|
|
Request content from an oEmbed endpoint.
|
|
|
|
|
2021-01-26 07:32:17 -05:00
|
|
|
Args:
|
2020-07-27 07:50:44 -04:00
|
|
|
endpoint: The oEmbed API endpoint.
|
|
|
|
url: The URL to pass to the API.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
An object representing the metadata returned.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
OEmbedError if fetching or parsing of the oEmbed information fails.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
logger.debug("Trying to get oEmbed content for url '%s'", url)
|
|
|
|
result = await self.client.get_json(
|
|
|
|
endpoint,
|
|
|
|
# TODO Specify max height / width.
|
|
|
|
# Note that only the JSON format is supported.
|
|
|
|
args={"url": url},
|
|
|
|
)
|
|
|
|
|
|
|
|
# Ensure there's a version of 1.0.
|
|
|
|
if result.get("version") != "1.0":
|
|
|
|
raise OEmbedError("Invalid version: %s" % (result.get("version"),))
|
|
|
|
|
|
|
|
oembed_type = result.get("type")
|
|
|
|
|
|
|
|
# Ensure the cache age is None or an int.
|
|
|
|
cache_age = result.get("cache_age")
|
|
|
|
if cache_age:
|
|
|
|
cache_age = int(cache_age)
|
|
|
|
|
|
|
|
oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
|
|
|
|
|
|
|
|
# HTML content.
|
|
|
|
if oembed_type == "rich":
|
|
|
|
oembed_result.html = result.get("html")
|
|
|
|
return oembed_result
|
|
|
|
|
|
|
|
if oembed_type == "photo":
|
|
|
|
oembed_result.url = result.get("url")
|
|
|
|
return oembed_result
|
|
|
|
|
|
|
|
# TODO Handle link and video types.
|
|
|
|
|
|
|
|
if "thumbnail_url" in result:
|
|
|
|
oembed_result.url = result.get("thumbnail_url")
|
|
|
|
return oembed_result
|
|
|
|
|
|
|
|
raise OEmbedError("Incompatible oEmbed information.")
|
|
|
|
|
|
|
|
except OEmbedError as e:
|
|
|
|
# Trap OEmbedErrors first so we can directly re-raise them.
|
|
|
|
logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
|
|
|
|
raise
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
# Trap any exception and let the code follow as usual.
|
|
|
|
# FIXME: pass through 404s and other error messages nicely
|
|
|
|
logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
|
|
|
|
raise OEmbedError() from e
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
async def _download_url(self, url: str, user: str) -> Dict[str, Any]:
|
2016-03-31 09:15:09 -04:00
|
|
|
# TODO: we should probably honour robots.txt... except in practice
|
|
|
|
# we're most likely being explicitly triggered by a human rather than a
|
|
|
|
# bot, so are we really a robot?
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
file_id = datetime.date.today().isoformat() + "_" + random_string(16)
|
2016-01-24 18:47:27 -05:00
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
|
2016-01-24 18:47:27 -05:00
|
|
|
|
2020-07-27 07:50:44 -04:00
|
|
|
# If this URL can be accessed via oEmbed, use that instead.
|
2020-09-24 10:47:20 -04:00
|
|
|
url_to_download = url # type: Optional[str]
|
2020-07-27 07:50:44 -04:00
|
|
|
oembed_url = self._get_oembed_url(url)
|
|
|
|
if oembed_url:
|
|
|
|
# The result might be a new URL to download, or it might be HTML content.
|
2018-02-01 19:35:18 -05:00
|
|
|
try:
|
2020-07-27 07:50:44 -04:00
|
|
|
oembed_result = await self._get_oembed_content(oembed_url, url)
|
|
|
|
if oembed_result.url:
|
|
|
|
url_to_download = oembed_result.url
|
|
|
|
elif oembed_result.html:
|
|
|
|
url_to_download = None
|
|
|
|
except OEmbedError:
|
|
|
|
# If an error occurs, try doing a normal preview.
|
|
|
|
pass
|
2019-05-10 13:32:44 -04:00
|
|
|
|
2020-07-27 07:50:44 -04:00
|
|
|
if url_to_download:
|
|
|
|
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
|
|
|
try:
|
|
|
|
logger.debug("Trying to get preview for url '%s'", url_to_download)
|
|
|
|
length, headers, uri, code = await self.client.get_file(
|
|
|
|
url_to_download,
|
|
|
|
output_stream=f,
|
|
|
|
max_size=self.max_spider_size,
|
|
|
|
headers={"Accept-Language": self.url_preview_accept_language},
|
|
|
|
)
|
|
|
|
except SynapseError:
|
|
|
|
# Pass SynapseErrors through directly, so that the servlet
|
|
|
|
# handler will return a SynapseError to the client instead of
|
|
|
|
# blank data or a 500.
|
|
|
|
raise
|
|
|
|
except DNSLookupError:
|
|
|
|
# DNS lookup returned no results
|
|
|
|
# Note: This will also be the case if one of the resolved IP
|
|
|
|
# addresses is blacklisted
|
|
|
|
raise SynapseError(
|
|
|
|
502,
|
|
|
|
"DNS resolution failure during URL preview generation",
|
|
|
|
Codes.UNKNOWN,
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
# FIXME: pass through 404s and other error messages nicely
|
|
|
|
logger.warning("Error downloading %s: %r", url_to_download, e)
|
|
|
|
|
|
|
|
raise SynapseError(
|
|
|
|
500,
|
|
|
|
"Failed to download content: %s"
|
|
|
|
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
|
|
|
Codes.UNKNOWN,
|
|
|
|
)
|
|
|
|
await finish()
|
|
|
|
|
|
|
|
if b"Content-Type" in headers:
|
|
|
|
media_type = headers[b"Content-Type"][0].decode("ascii")
|
|
|
|
else:
|
|
|
|
media_type = "application/octet-stream"
|
|
|
|
|
|
|
|
download_name = get_filename_from_headers(headers)
|
|
|
|
|
|
|
|
# FIXME: we should calculate a proper expiration based on the
|
|
|
|
# Cache-Control and Expire headers. But for now, assume 1 hour.
|
|
|
|
expires = ONE_HOUR
|
2020-09-24 10:47:20 -04:00
|
|
|
etag = (
|
|
|
|
headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
|
|
|
|
)
|
2020-07-27 07:50:44 -04:00
|
|
|
else:
|
2020-09-24 10:47:20 -04:00
|
|
|
# we can only get here if we did an oembed request and have an oembed_result.html
|
|
|
|
assert oembed_result.html is not None
|
|
|
|
assert oembed_url is not None
|
|
|
|
|
|
|
|
html_bytes = oembed_result.html.encode("utf-8")
|
2020-07-27 07:50:44 -04:00
|
|
|
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
|
|
|
f.write(html_bytes)
|
|
|
|
await finish()
|
|
|
|
|
|
|
|
media_type = "text/html"
|
|
|
|
download_name = oembed_result.title
|
|
|
|
length = len(html_bytes)
|
|
|
|
# If a specific cache age was not given, assume 1 hour.
|
|
|
|
expires = oembed_result.cache_age or ONE_HOUR
|
|
|
|
uri = oembed_url
|
|
|
|
code = 200
|
|
|
|
etag = None
|
2016-04-01 22:06:39 -04:00
|
|
|
|
2018-02-01 19:35:18 -05:00
|
|
|
try:
|
2016-01-24 18:47:27 -05:00
|
|
|
time_now_ms = self.clock.time_msec()
|
|
|
|
|
2020-03-20 07:20:02 -04:00
|
|
|
await self.store.store_local_media(
|
2016-03-31 21:17:48 -04:00
|
|
|
media_id=file_id,
|
2016-01-24 18:47:27 -05:00
|
|
|
media_type=media_type,
|
2020-07-27 07:50:44 -04:00
|
|
|
time_now_ms=time_now_ms,
|
2016-01-24 18:47:27 -05:00
|
|
|
upload_name=download_name,
|
|
|
|
media_length=length,
|
2016-03-28 22:13:25 -04:00
|
|
|
user_id=user,
|
2017-06-23 06:14:11 -04:00
|
|
|
url_cache=url,
|
2016-01-24 18:47:27 -05:00
|
|
|
)
|
|
|
|
|
2016-04-08 16:36:48 -04:00
|
|
|
except Exception as e:
|
2018-02-01 19:35:18 -05:00
|
|
|
logger.error("Error handling downloaded %s: %r", url, e)
|
|
|
|
# TODO: we really ought to delete the downloaded file in this
|
|
|
|
# case, since we won't have recorded it in the db, and will
|
|
|
|
# therefore not expire it.
|
|
|
|
raise
|
2016-01-24 18:47:27 -05:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return {
|
|
|
|
"media_type": media_type,
|
|
|
|
"media_length": length,
|
|
|
|
"download_name": download_name,
|
|
|
|
"created_ts": time_now_ms,
|
|
|
|
"filesystem_id": file_id,
|
|
|
|
"filename": fname,
|
|
|
|
"uri": uri,
|
|
|
|
"response_code": code,
|
2020-07-27 07:50:44 -04:00
|
|
|
"expires": expires,
|
|
|
|
"etag": etag,
|
2019-07-23 09:00:55 -04:00
|
|
|
}
|
2016-01-24 18:47:27 -05:00
|
|
|
|
2018-07-25 04:41:12 -04:00
|
|
|
def _start_expire_url_cache_data(self):
|
2018-07-26 06:44:26 -04:00
|
|
|
return run_as_background_process(
|
2019-06-20 05:32:02 -04:00
|
|
|
"expire_url_cache_data", self._expire_url_cache_data
|
2018-07-25 04:41:12 -04:00
|
|
|
)
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
async def _expire_url_cache_data(self) -> None:
|
2017-09-28 07:18:06 -04:00
|
|
|
"""Clean up expired url cache content, media and thumbnails.
|
|
|
|
"""
|
2017-10-12 13:16:25 -04:00
|
|
|
# TODO: Delete from backup media store
|
|
|
|
|
2020-06-17 09:13:30 -04:00
|
|
|
assert self._worker_run_media_background_jobs
|
|
|
|
|
2017-09-28 07:18:06 -04:00
|
|
|
now = self.clock.time_msec()
|
|
|
|
|
2020-04-22 07:45:16 -04:00
|
|
|
logger.debug("Running url preview cache expiry")
|
2017-11-21 06:03:21 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
if not (await self.store.db_pool.updates.has_completed_background_updates()):
|
2017-11-21 06:03:21 -05:00
|
|
|
logger.info("Still running DB updates; skipping expiry")
|
|
|
|
return
|
|
|
|
|
2017-09-28 07:18:06 -04:00
|
|
|
# First we delete expired url cache entries
|
2020-03-20 07:20:02 -04:00
|
|
|
media_ids = await self.store.get_expired_url_cache(now)
|
2017-09-28 07:18:06 -04:00
|
|
|
|
|
|
|
removed_media = []
|
|
|
|
for media_id in media_ids:
|
|
|
|
fname = self.filepaths.url_cache_filepath(media_id)
|
|
|
|
try:
|
|
|
|
os.remove(fname)
|
|
|
|
except OSError as e:
|
|
|
|
# If the path doesn't exist, meh
|
|
|
|
if e.errno != errno.ENOENT:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("Failed to remove media: %r: %s", media_id, e)
|
2017-09-28 07:18:06 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
removed_media.append(media_id)
|
|
|
|
|
|
|
|
try:
|
|
|
|
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
|
|
|
for dir in dirs:
|
|
|
|
os.rmdir(dir)
|
2017-10-23 10:52:32 -04:00
|
|
|
except Exception:
|
2017-09-28 07:18:06 -04:00
|
|
|
pass
|
|
|
|
|
2020-03-20 07:20:02 -04:00
|
|
|
await self.store.delete_url_cache(removed_media)
|
2017-09-28 07:18:06 -04:00
|
|
|
|
2017-09-28 11:08:08 -04:00
|
|
|
if removed_media:
|
|
|
|
logger.info("Deleted %d entries from url cache", len(removed_media))
|
2020-04-22 07:45:16 -04:00
|
|
|
else:
|
|
|
|
logger.debug("No entries removed from url cache")
|
2017-09-28 07:18:06 -04:00
|
|
|
|
|
|
|
# Now we delete old images associated with the url cache.
|
|
|
|
# These may be cached for a bit on the client (i.e., they
|
|
|
|
# may have a room open with a preview url thing open).
|
|
|
|
# So we wait a couple of days before deleting, just in case.
|
2020-07-27 07:50:44 -04:00
|
|
|
expire_before = now - 2 * 24 * ONE_HOUR
|
2020-03-20 07:20:02 -04:00
|
|
|
media_ids = await self.store.get_url_cache_media_before(expire_before)
|
2017-09-28 07:18:06 -04:00
|
|
|
|
|
|
|
removed_media = []
|
|
|
|
for media_id in media_ids:
|
|
|
|
fname = self.filepaths.url_cache_filepath(media_id)
|
|
|
|
try:
|
|
|
|
os.remove(fname)
|
|
|
|
except OSError as e:
|
|
|
|
# If the path doesn't exist, meh
|
|
|
|
if e.errno != errno.ENOENT:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("Failed to remove media: %r: %s", media_id, e)
|
2017-09-28 07:18:06 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
|
|
|
for dir in dirs:
|
|
|
|
os.rmdir(dir)
|
2017-10-23 10:52:32 -04:00
|
|
|
except Exception:
|
2017-09-28 07:18:06 -04:00
|
|
|
pass
|
|
|
|
|
|
|
|
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
|
|
|
|
try:
|
|
|
|
shutil.rmtree(thumbnail_dir)
|
|
|
|
except OSError as e:
|
|
|
|
# If the path doesn't exist, meh
|
|
|
|
if e.errno != errno.ENOENT:
|
2019-10-31 06:23:24 -04:00
|
|
|
logger.warning("Failed to remove media: %r: %s", media_id, e)
|
2017-09-28 07:18:06 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
removed_media.append(media_id)
|
|
|
|
|
|
|
|
try:
|
|
|
|
dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
|
|
|
|
for dir in dirs:
|
|
|
|
os.rmdir(dir)
|
2017-10-23 10:52:32 -04:00
|
|
|
except Exception:
|
2017-09-28 07:18:06 -04:00
|
|
|
pass
|
|
|
|
|
2020-03-20 07:20:02 -04:00
|
|
|
await self.store.delete_url_cache_media(removed_media)
|
2017-09-28 07:18:06 -04:00
|
|
|
|
2020-04-22 07:45:16 -04:00
|
|
|
if removed_media:
|
|
|
|
logger.info("Deleted %d media from url cache", len(removed_media))
|
|
|
|
else:
|
|
|
|
logger.debug("No media removed from url cache")
|
2017-09-28 07:18:06 -04:00
|
|
|
|
2016-01-24 18:47:27 -05:00
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def decode_and_calc_og(
|
|
|
|
body: bytes, media_uri: str, request_encoding: Optional[str] = None
|
|
|
|
) -> Dict[str, Optional[str]]:
|
2021-01-26 07:32:17 -05:00
|
|
|
"""
|
|
|
|
Calculate metadata for an HTML document.
|
|
|
|
|
|
|
|
This uses lxml to parse the HTML document into the OG response. If errors
|
|
|
|
occur during processing of the document, an empty response is returned.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
body: The HTML document, as bytes.
|
|
|
|
media_url: The URI used to download the body.
|
|
|
|
request_encoding: The character encoding of the body, as a string.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The OG response as a dictionary.
|
|
|
|
"""
|
2020-12-07 10:00:08 -05:00
|
|
|
# If there's no body, nothing useful is going to be found.
|
|
|
|
if not body:
|
|
|
|
return {}
|
|
|
|
|
2016-08-16 09:53:18 -04:00
|
|
|
from lxml import etree
|
|
|
|
|
2021-01-26 07:32:17 -05:00
|
|
|
# Create an HTML parser. If this fails, log and return no metadata.
|
2016-08-16 09:53:18 -04:00
|
|
|
try:
|
|
|
|
parser = etree.HTMLParser(recover=True, encoding=request_encoding)
|
2021-01-26 07:32:17 -05:00
|
|
|
except LookupError:
|
|
|
|
# blindly consider the encoding as utf-8.
|
|
|
|
parser = etree.HTMLParser(recover=True, encoding="utf-8")
|
|
|
|
except Exception as e:
|
|
|
|
logger.warning("Unable to create HTML parser: %s" % (e,))
|
|
|
|
return {}
|
|
|
|
|
|
|
|
def _attempt_calc_og(body_attempt: Union[bytes, str]) -> Dict[str, Optional[str]]:
|
|
|
|
# Attempt to parse the body. If this fails, log and return no metadata.
|
|
|
|
tree = etree.fromstring(body_attempt, parser)
|
|
|
|
return _calc_og(tree, media_uri)
|
|
|
|
|
|
|
|
# Attempt to parse the body. If this fails, log and return no metadata.
|
|
|
|
try:
|
|
|
|
return _attempt_calc_og(body)
|
2016-08-16 09:53:18 -04:00
|
|
|
except UnicodeDecodeError:
|
|
|
|
# blindly try decoding the body as utf-8, which seems to fix
|
|
|
|
# the charset mismatches on https://google.com
|
2021-01-26 07:32:17 -05:00
|
|
|
return _attempt_calc_og(body.decode("utf-8", "ignore"))
|
2016-08-16 09:53:18 -04:00
|
|
|
|
|
|
|
|
2021-01-26 07:32:17 -05:00
|
|
|
def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]:
|
2016-08-16 09:53:18 -04:00
|
|
|
# suck our tree into lxml and define our OG response.
|
|
|
|
|
|
|
|
# if we see any image URLs in the OG response, then spider them
|
|
|
|
# (although the client could choose to do this by asking for previews of those
|
|
|
|
# URLs to avoid DoSing the server)
|
|
|
|
|
|
|
|
# "og:type" : "video",
|
|
|
|
# "og:url" : "https://www.youtube.com/watch?v=LXDBoHyjmtw",
|
|
|
|
# "og:site_name" : "YouTube",
|
|
|
|
# "og:video:type" : "application/x-shockwave-flash",
|
|
|
|
# "og:description" : "Fun stuff happening here",
|
|
|
|
# "og:title" : "RemoteJam - Matrix team hack for Disrupt Europe Hackathon",
|
|
|
|
# "og:image" : "https://i.ytimg.com/vi/LXDBoHyjmtw/maxresdefault.jpg",
|
|
|
|
# "og:video:url" : "http://www.youtube.com/v/LXDBoHyjmtw?version=3&autohide=1",
|
|
|
|
# "og:video:width" : "1280"
|
|
|
|
# "og:video:height" : "720",
|
|
|
|
# "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3",
|
|
|
|
|
2020-01-20 12:38:21 -05:00
|
|
|
og = {} # type: Dict[str, Optional[str]]
|
2016-08-16 09:53:18 -04:00
|
|
|
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
|
2019-06-20 05:32:02 -04:00
|
|
|
if "content" in tag.attrib:
|
2019-11-05 10:45:17 -05:00
|
|
|
# if we've got more than 50 tags, someone is taking the piss
|
|
|
|
if len(og) >= 50:
|
2019-11-05 11:46:39 -05:00
|
|
|
logger.warning("Skipping OG for page with too many 'og:' tags")
|
2019-11-05 10:45:17 -05:00
|
|
|
return {}
|
2019-06-20 05:32:02 -04:00
|
|
|
og[tag.attrib["property"]] = tag.attrib["content"]
|
2016-08-16 09:53:18 -04:00
|
|
|
|
|
|
|
# TODO: grab article: meta tags too, e.g.:
|
|
|
|
|
|
|
|
# "article:publisher" : "https://www.facebook.com/thethudonline" />
|
|
|
|
# "article:author" content="https://www.facebook.com/thethudonline" />
|
|
|
|
# "article:tag" content="baby" />
|
|
|
|
# "article:section" content="Breaking News" />
|
|
|
|
# "article:published_time" content="2016-03-31T19:58:24+00:00" />
|
|
|
|
# "article:modified_time" content="2016-04-01T18:31:53+00:00" />
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if "og:title" not in og:
|
2016-08-16 09:53:18 -04:00
|
|
|
# do some basic spidering of the HTML
|
|
|
|
title = tree.xpath("(//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1]")
|
2016-12-14 16:38:18 -05:00
|
|
|
if title and title[0].text is not None:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:title"] = title[0].text.strip()
|
2016-12-14 16:38:18 -05:00
|
|
|
else:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:title"] = None
|
2016-08-16 09:53:18 -04:00
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if "og:image" not in og:
|
2016-08-16 09:53:18 -04:00
|
|
|
# TODO: extract a favicon failing all else
|
|
|
|
meta_image = tree.xpath(
|
|
|
|
"//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content"
|
|
|
|
)
|
|
|
|
if meta_image:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:image"] = _rebase_url(meta_image[0], media_uri)
|
2016-08-16 09:53:18 -04:00
|
|
|
else:
|
|
|
|
# TODO: consider inlined CSS styles as well as width & height attribs
|
|
|
|
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
|
2019-06-20 05:32:02 -04:00
|
|
|
images = sorted(
|
|
|
|
images,
|
|
|
|
key=lambda i: (
|
|
|
|
-1 * float(i.attrib["width"]) * float(i.attrib["height"])
|
|
|
|
),
|
|
|
|
)
|
2016-08-16 09:53:18 -04:00
|
|
|
if not images:
|
|
|
|
images = tree.xpath("//img[@src]")
|
|
|
|
if images:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:image"] = images[0].attrib["src"]
|
2016-08-16 09:53:18 -04:00
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if "og:description" not in og:
|
2016-08-16 09:53:18 -04:00
|
|
|
meta_description = tree.xpath(
|
|
|
|
"//*/meta"
|
|
|
|
"[translate(@name, 'DESCRIPTION', 'description')='description']"
|
2019-06-20 05:32:02 -04:00
|
|
|
"/@content"
|
|
|
|
)
|
2016-08-16 09:53:18 -04:00
|
|
|
if meta_description:
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:description"] = meta_description[0]
|
2016-08-16 09:53:18 -04:00
|
|
|
else:
|
|
|
|
# grab any text nodes which are inside the <body/> tag...
|
|
|
|
# unless they are within an HTML5 semantic markup tag...
|
|
|
|
# <header/>, <nav/>, <aside/>, <footer/>
|
|
|
|
# ...or if they are within a <script/> or <style/> tag.
|
|
|
|
# This is a very very very coarse approximation to a plain text
|
|
|
|
# render of the page.
|
|
|
|
|
|
|
|
# We don't just use XPATH here as that is slow on some machines.
|
|
|
|
|
|
|
|
from lxml import etree
|
|
|
|
|
|
|
|
TAGS_TO_REMOVE = (
|
2017-10-25 20:44:34 -04:00
|
|
|
"header",
|
|
|
|
"nav",
|
|
|
|
"aside",
|
|
|
|
"footer",
|
|
|
|
"script",
|
|
|
|
"noscript",
|
|
|
|
"style",
|
2019-06-20 05:32:02 -04:00
|
|
|
etree.Comment,
|
2016-08-16 09:53:18 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# Split all the text nodes into paragraphs (by splitting on new
|
|
|
|
# lines)
|
|
|
|
text_nodes = (
|
2019-06-20 05:32:02 -04:00
|
|
|
re.sub(r"\s+", "\n", el).strip()
|
2016-08-16 09:53:18 -04:00
|
|
|
for el in _iterate_over_text(tree.find("body"), *TAGS_TO_REMOVE)
|
|
|
|
)
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:description"] = summarize_paragraphs(text_nodes)
|
2021-01-15 10:57:37 -05:00
|
|
|
elif og["og:description"]:
|
|
|
|
# This must be a non-empty string at this point.
|
|
|
|
assert isinstance(og["og:description"], str)
|
2019-06-20 05:32:02 -04:00
|
|
|
og["og:description"] = summarize_paragraphs([og["og:description"]])
|
2016-08-16 09:53:18 -04:00
|
|
|
|
|
|
|
# TODO: delete the url downloads to stop diskfilling,
|
|
|
|
# as we only ever cared about its OG
|
|
|
|
return og
|
|
|
|
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def _iterate_over_text(
|
|
|
|
tree, *tags_to_ignore: Iterable[Union[str, "etree.Comment"]]
|
|
|
|
) -> Generator[str, None, None]:
|
2016-08-16 09:53:18 -04:00
|
|
|
"""Iterate over the tree returning text nodes in a depth first fashion,
|
|
|
|
skipping text nodes inside certain tags.
|
|
|
|
"""
|
|
|
|
# This is basically a stack that we extend using itertools.chain.
|
|
|
|
# This will either consist of an element to iterate over *or* a string
|
|
|
|
# to be returned.
|
|
|
|
elements = iter([tree])
|
|
|
|
while True:
|
2018-10-17 11:09:34 -04:00
|
|
|
el = next(elements, None)
|
|
|
|
if el is None:
|
|
|
|
return
|
|
|
|
|
2020-06-16 08:51:47 -04:00
|
|
|
if isinstance(el, str):
|
2016-08-16 09:53:18 -04:00
|
|
|
yield el
|
2018-10-17 11:09:34 -04:00
|
|
|
elif el.tag not in tags_to_ignore:
|
2016-08-16 09:53:18 -04:00
|
|
|
# el.text is the text before the first child, so we can immediately
|
|
|
|
# return it if the text exists.
|
|
|
|
if el.text:
|
|
|
|
yield el.text
|
|
|
|
|
|
|
|
# We add to the stack all the elements children, interspersed with
|
|
|
|
# each child's tail text (if it exists). The tail text of a node
|
|
|
|
# is text that comes *after* the node, so we always include it even
|
|
|
|
# if we ignore the child node.
|
|
|
|
elements = itertools.chain(
|
|
|
|
itertools.chain.from_iterable( # Basically a flatmap
|
|
|
|
[child, child.tail] if child.tail else [child]
|
|
|
|
for child in el.iterchildren()
|
|
|
|
),
|
2019-06-20 05:32:02 -04:00
|
|
|
elements,
|
2016-08-16 09:53:18 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def _rebase_url(url: str, base: str) -> str:
|
|
|
|
base_parts = list(urlparse.urlparse(base))
|
|
|
|
url_parts = list(urlparse.urlparse(url))
|
|
|
|
if not url_parts[0]: # fix up schema
|
|
|
|
url_parts[0] = base_parts[0] or "http"
|
|
|
|
if not url_parts[1]: # fix up hostname
|
|
|
|
url_parts[1] = base_parts[1]
|
|
|
|
if not url_parts[2].startswith("/"):
|
|
|
|
url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts[2]) + url_parts[2]
|
|
|
|
return urlparse.urlunparse(url_parts)
|
2016-08-16 09:53:18 -04:00
|
|
|
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def _is_media(content_type: str) -> bool:
|
|
|
|
return content_type.lower().startswith("image/")
|
2016-08-16 09:53:18 -04:00
|
|
|
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def _is_html(content_type: str) -> bool:
|
2016-08-16 09:53:18 -04:00
|
|
|
content_type = content_type.lower()
|
2021-01-15 10:57:37 -05:00
|
|
|
return content_type.startswith("text/html") or content_type.startswith(
|
2019-06-20 05:32:02 -04:00
|
|
|
"application/xhtml"
|
2021-01-15 10:57:37 -05:00
|
|
|
)
|
2016-08-04 11:08:32 -04:00
|
|
|
|
|
|
|
|
2021-01-15 10:57:37 -05:00
|
|
|
def summarize_paragraphs(
|
|
|
|
text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500
|
|
|
|
) -> Optional[str]:
|
2016-08-04 11:08:32 -04:00
|
|
|
# Try to get a summary of between 200 and 500 words, respecting
|
|
|
|
# first paragraph and then word boundaries.
|
|
|
|
# TODO: Respect sentences?
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
description = ""
|
2016-08-04 11:08:32 -04:00
|
|
|
|
|
|
|
# Keep adding paragraphs until we get to the MIN_SIZE.
|
|
|
|
for text_node in text_nodes:
|
|
|
|
if len(description) < min_size:
|
2019-06-20 05:32:02 -04:00
|
|
|
text_node = re.sub(r"[\t \r\n]+", " ", text_node)
|
|
|
|
description += text_node + "\n\n"
|
2016-08-04 11:08:32 -04:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
description = description.strip()
|
2019-06-20 05:32:02 -04:00
|
|
|
description = re.sub(r"[\t ]+", " ", description)
|
|
|
|
description = re.sub(r"[\t \r\n]*[\r\n]+", "\n\n", description)
|
2016-08-04 11:08:32 -04:00
|
|
|
|
|
|
|
# If the concatenation of paragraphs to get above MIN_SIZE
|
|
|
|
# took us over MAX_SIZE, then we need to truncate mid paragraph
|
|
|
|
if len(description) > max_size:
|
|
|
|
new_desc = ""
|
|
|
|
|
|
|
|
# This splits the paragraph into words, but keeping the
|
|
|
|
# (preceeding) whitespace intact so we can easily concat
|
|
|
|
# words back together.
|
2018-10-24 05:35:01 -04:00
|
|
|
for match in re.finditer(r"\s*\S+", description):
|
2016-08-04 11:08:32 -04:00
|
|
|
word = match.group()
|
|
|
|
|
|
|
|
# Keep adding words while the total length is less than
|
|
|
|
# MAX_SIZE.
|
|
|
|
if len(word) + len(new_desc) < max_size:
|
|
|
|
new_desc += word
|
|
|
|
else:
|
|
|
|
# At this point the next word *will* take us over
|
|
|
|
# MAX_SIZE, but we also want to ensure that its not
|
|
|
|
# a huge word. If it is add it anyway and we'll
|
|
|
|
# truncate later.
|
|
|
|
if len(new_desc) < min_size:
|
|
|
|
new_desc += word
|
|
|
|
break
|
|
|
|
|
|
|
|
# Double check that we're not over the limit
|
|
|
|
if len(new_desc) > max_size:
|
|
|
|
new_desc = new_desc[:max_size]
|
|
|
|
|
|
|
|
# We always add an ellipsis because at the very least
|
|
|
|
# we chopped mid paragraph.
|
2019-06-20 05:32:02 -04:00
|
|
|
description = new_desc.strip() + "…"
|
2016-08-04 11:08:32 -04:00
|
|
|
return description if description else None
|