# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from twisted.web.server import NOT_DONE_YET from twisted.internet import defer from twisted.web.resource import Resource from synapse.api.errors import ( SynapseError, Codes, ) from synapse.util.stringutils import random_string from synapse.util.caches.expiringcache import ExpiringCache from synapse.http.client import SpiderHttpClient from synapse.http.server import ( request_handler, respond_with_json_bytes ) from synapse.util.async import ObservableDeferred from synapse.util.stringutils import is_ascii import os import re import fnmatch import cgi import ujson as json import urlparse import logging logger = logging.getLogger(__name__) class PreviewUrlResource(Resource): isLeaf = True def __init__(self, hs, media_repo): Resource.__init__(self) self.auth = hs.get_auth() self.clock = hs.get_clock() self.version_string = hs.version_string self.filepaths = media_repo.filepaths self.max_spider_size = hs.config.max_spider_size self.server_name = hs.hostname self.store = hs.get_datastore() self.client = SpiderHttpClient(hs) self.media_repo = media_repo if hasattr(hs.config, "url_preview_url_blacklist"): self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist # simple memory cache mapping urls to OG metadata self.cache = ExpiringCache( cache_name="url_previews", clock=self.clock, # don't spider URLs more often than once an hour expiry_ms=60 * 60 * 1000, ) self.cache.start() self.downloads = {} def render_GET(self, request): self._async_render_GET(request) return NOT_DONE_YET @request_handler @defer.inlineCallbacks def _async_render_GET(self, request): # XXX: if get_user_by_req fails, what should we do in an async render? requester = yield self.auth.get_user_by_req(request) url = request.args.get("url")[0] if "ts" in request.args: ts = int(request.args.get("ts")[0]) else: ts = self.clock.time_msec() # impose the URL pattern blacklist if hasattr(self, "url_preview_url_blacklist"): url_tuple = urlparse.urlsplit(url) for entry in self.url_preview_url_blacklist: match = True for attrib in entry: pattern = entry[attrib] value = getattr(url_tuple, attrib) logger.debug(( "Matching attrib '%s' with value '%s' against" " pattern '%s'" ) % (attrib, value, pattern)) if value is None: match = False continue if pattern.startswith('^'): if not re.match(pattern, getattr(url_tuple, attrib)): match = False continue else: if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern): match = False continue if match: logger.warn( "URL %s blocked by url_blacklist entry %s", url, entry ) raise SynapseError( 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN ) # first check the memory cache - good to handle all the clients on this # HS thundering away to preview the same URL at the same time. og = self.cache.get(url) if og: respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True) return # then check the URL cache in the DB (which will also provide us with # historical previews, if we have any) cache_result = yield self.store.get_url_cache(url, ts) if ( cache_result and cache_result["download_ts"] + cache_result["expires"] > ts and cache_result["response_code"] / 100 == 2 ): respond_with_json_bytes( request, 200, cache_result["og"].encode('utf-8'), send_cors=True ) return # Ensure only one download for a given URL is active at a time download = self.downloads.get(url) if download is None: download = self._download_url(url, requester.user) download = ObservableDeferred( download, consumeErrors=True ) self.downloads[url] = download @download.addBoth def callback(media_info): del self.downloads[url] return media_info media_info = yield download.observe() # FIXME: we should probably update our cache now anyway, so that # even if the OG calculation raises, we don't keep hammering on the # remote server. For now, leave it uncached to aid debugging OG # calculation problems logger.debug("got media_info of '%s'" % media_info) if self._is_media(media_info['media_type']): dims = yield self.media_repo._generate_local_thumbnails( media_info['filesystem_id'], media_info ) og = { "og:description": media_info['download_name'], "og:image": "mxc://%s/%s" % ( self.server_name, media_info['filesystem_id'] ), "og:image:type": media_info['media_type'], "matrix:image:size": media_info['media_length'], } if dims: og["og:image:width"] = dims['width'] og["og:image:height"] = dims['height'] else: logger.warn("Couldn't get dims for %s" % url) # define our OG response for this media elif self._is_html(media_info['media_type']): # TODO: somehow stop a big HTML tree from exploding synapse's RAM from lxml import etree file = open(media_info['filename']) body = file.read() file.close() # clobber the encoding from the content-type, or default to utf-8 # XXX: this overrides any or XML charset headers in the body # which may pose problems, but so far seems to work okay. match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I) encoding = match.group(1) if match else "utf-8" try: parser = etree.HTMLParser(recover=True, encoding=encoding) tree = etree.fromstring(body, parser) og = yield self._calc_og(tree, media_info, requester) except UnicodeDecodeError: # blindly try decoding the body as utf-8, which seems to fix # the charset mismatches on https://google.com parser = etree.HTMLParser(recover=True, encoding=encoding) tree = etree.fromstring(body.decode('utf-8', 'ignore'), parser) og = yield self._calc_og(tree, media_info, requester) else: logger.warn("Failed to find any OG data in %s", url) og = {} logger.debug("Calculated OG for %s as %s" % (url, og)) # store OG in ephemeral in-memory cache self.cache[url] = og # store OG in history-aware DB cache yield self.store.store_url_cache( url, media_info["response_code"], media_info["etag"], media_info["expires"], json.dumps(og), media_info["filesystem_id"], media_info["created_ts"], ) respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True) @defer.inlineCallbacks def _calc_og(self, tree, media_info, requester): # suck our tree into lxml and define our OG response. # if we see any image URLs in the OG response, then spider them # (although the client could choose to do this by asking for previews of those # URLs to avoid DoSing the server) # "og:type" : "video", # "og:url" : "https://www.youtube.com/watch?v=LXDBoHyjmtw", # "og:site_name" : "YouTube", # "og:video:type" : "application/x-shockwave-flash", # "og:description" : "Fun stuff happening here", # "og:title" : "RemoteJam - Matrix team hack for Disrupt Europe Hackathon", # "og:image" : "https://i.ytimg.com/vi/LXDBoHyjmtw/maxresdefault.jpg", # "og:video:url" : "http://www.youtube.com/v/LXDBoHyjmtw?version=3&autohide=1", # "og:video:width" : "1280" # "og:video:height" : "720", # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3", og = {} for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"): og[tag.attrib['property']] = tag.attrib['content'] # TODO: grab article: meta tags too, e.g.: # "article:publisher" : "https://www.facebook.com/thethudonline" /> # "article:author" content="https://www.facebook.com/thethudonline" /> # "article:tag" content="baby" /> # "article:section" content="Breaking News" /> # "article:published_time" content="2016-03-31T19:58:24+00:00" /> # "article:modified_time" content="2016-04-01T18:31:53+00:00" /> if 'og:title' not in og: # do some basic spidering of the HTML title = tree.xpath("(//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1]") og['og:title'] = title[0].text.strip() if title else None if 'og:image' not in og: # TODO: extract a favicon failing all else meta_image = tree.xpath( "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content" ) if meta_image: og['og:image'] = self._rebase_url(meta_image[0], media_info['uri']) else: # TODO: consider inlined CSS styles as well as width & height attribs images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") images = sorted(images, key=lambda i: ( -1 * int(i.attrib['width']) * int(i.attrib['height']) )) if not images: images = tree.xpath("//img[@src]") if images: og['og:image'] = images[0].attrib['src'] # pre-cache the image for posterity # FIXME: it might be cleaner to use the same flow as the main /preview_url request # itself and benefit from the same caching etc. But for now we just rely on the # caching on the master request to speed things up. if 'og:image' in og and og['og:image']: image_info = yield self._download_url( self._rebase_url(og['og:image'], media_info['uri']), requester.user ) if self._is_media(image_info['media_type']): # TODO: make sure we don't choke on white-on-transparent images dims = yield self.media_repo._generate_local_thumbnails( image_info['filesystem_id'], image_info ) if dims: og["og:image:width"] = dims['width'] og["og:image:height"] = dims['height'] else: logger.warn("Couldn't get dims for %s" % og["og:image"]) og["og:image"] = "mxc://%s/%s" % ( self.server_name, image_info['filesystem_id'] ) og["og:image:type"] = image_info['media_type'] og["matrix:image:size"] = image_info['media_length'] else: del og["og:image"] if 'og:description' not in og: meta_description = tree.xpath( "//*/meta" "[translate(@name, 'DESCRIPTION', 'description')='description']" "/@content") if meta_description: og['og:description'] = meta_description[0] else: # grab any text nodes which are inside the tag... # unless they are within an HTML5 semantic markup tag... #
,