2019-08-07 10:36:38 -04:00
|
|
|
#
|
2023-11-21 15:29:58 -05:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
2024-01-23 06:26:48 -05:00
|
|
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
2023-11-21 15:29:58 -05:00
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2019-08-07 10:36:38 -04:00
|
|
|
#
|
|
|
|
#
|
|
|
|
import logging
|
|
|
|
import random
|
|
|
|
import time
|
2020-12-16 17:25:24 -05:00
|
|
|
from io import BytesIO
|
2020-09-01 09:15:22 -04:00
|
|
|
from typing import Callable, Dict, Optional, Tuple
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
import attr
|
|
|
|
|
|
|
|
from twisted.internet import defer
|
2020-11-25 07:07:21 -05:00
|
|
|
from twisted.internet.interfaces import IReactorTime
|
2020-12-16 17:25:24 -05:00
|
|
|
from twisted.web.client import RedirectAgent
|
2019-08-07 10:36:38 -04:00
|
|
|
from twisted.web.http import stringToDatetime
|
2020-06-16 10:43:29 -04:00
|
|
|
from twisted.web.http_headers import Headers
|
2020-11-25 07:07:21 -05:00
|
|
|
from twisted.web.iweb import IAgent, IResponse
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2020-12-16 17:25:24 -05:00
|
|
|
from synapse.http.client import BodyExceededMaxSize, read_body_with_max_size
|
2019-08-07 10:36:38 -04:00
|
|
|
from synapse.logging.context import make_deferred_yieldable
|
2020-08-19 07:26:03 -04:00
|
|
|
from synapse.util import Clock, json_decoder
|
2019-08-07 10:36:38 -04:00
|
|
|
from synapse.util.caches.ttlcache import TTLCache
|
|
|
|
from synapse.util.metrics import Measure
|
|
|
|
|
|
|
|
# period to cache .well-known results for by default
|
|
|
|
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
|
|
|
|
|
2019-08-20 06:46:00 -04:00
|
|
|
# jitter factor to add to the .well-known default cache ttls
|
|
|
|
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
# period to cache failure to fetch .well-known for
|
|
|
|
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
# period to cache failure to fetch .well-known if there has recently been a
|
|
|
|
# valid well-known for that domain.
|
|
|
|
WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
|
|
|
|
|
|
|
|
# period to remember there was a valid well-known after valid record expires
|
|
|
|
WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
|
|
|
|
|
2019-08-07 10:36:38 -04:00
|
|
|
# cap for .well-known cache period
|
|
|
|
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
|
|
|
|
|
|
|
|
# lower bound for .well-known cache period
|
|
|
|
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
|
|
|
|
|
2020-12-16 17:25:24 -05:00
|
|
|
# The maximum size (in bytes) to allow a well-known file to be.
|
|
|
|
WELL_KNOWN_MAX_SIZE = 50 * 1024 # 50 KiB
|
|
|
|
|
2019-08-12 10:39:14 -04:00
|
|
|
# Attempt to refetch a cached well-known N% of the TTL before it expires.
|
|
|
|
# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
|
|
|
|
# we'll start trying to refetch 1 minute before it expires.
|
|
|
|
WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
# Number of times we retry fetching a well-known for a domain we know recently
|
|
|
|
# had a valid entry.
|
|
|
|
WELL_KNOWN_RETRY_ATTEMPTS = 3
|
|
|
|
|
2019-08-12 10:39:14 -04:00
|
|
|
|
2019-08-07 10:36:38 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
_well_known_cache: TTLCache[bytes, Optional[bytes]] = TTLCache("well-known")
|
|
|
|
_had_valid_well_known_cache: TTLCache[bytes, bool] = TTLCache("had-valid-well-known")
|
2019-08-13 05:06:51 -04:00
|
|
|
|
|
|
|
|
2022-05-12 10:33:50 -04:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-09-04 06:54:56 -04:00
|
|
|
class WellKnownLookupResult:
|
2022-05-12 10:33:50 -04:00
|
|
|
delegated_server: Optional[bytes]
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
|
2020-09-04 06:54:56 -04:00
|
|
|
class WellKnownResolver:
|
2019-08-07 10:36:38 -04:00
|
|
|
"""Handles well-known lookups for matrix servers."""
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
def __init__(
|
2020-06-16 10:43:29 -04:00
|
|
|
self,
|
2020-11-25 07:07:21 -05:00
|
|
|
reactor: IReactorTime,
|
|
|
|
agent: IAgent,
|
|
|
|
user_agent: bytes,
|
2021-03-29 12:15:33 -04:00
|
|
|
well_known_cache: Optional[TTLCache[bytes, Optional[bytes]]] = None,
|
|
|
|
had_well_known_cache: Optional[TTLCache[bytes, bool]] = None,
|
2019-08-13 13:04:46 -04:00
|
|
|
):
|
2019-08-07 10:36:38 -04:00
|
|
|
self._reactor = reactor
|
|
|
|
self._clock = Clock(reactor)
|
|
|
|
|
|
|
|
if well_known_cache is None:
|
2019-08-13 05:06:51 -04:00
|
|
|
well_known_cache = _well_known_cache
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
if had_well_known_cache is None:
|
|
|
|
had_well_known_cache = _had_valid_well_known_cache
|
|
|
|
|
2019-08-07 10:36:38 -04:00
|
|
|
self._well_known_cache = well_known_cache
|
2019-08-13 13:04:46 -04:00
|
|
|
self._had_valid_well_known_cache = had_well_known_cache
|
2019-08-07 10:36:38 -04:00
|
|
|
self._well_known_agent = RedirectAgent(agent)
|
2020-06-16 10:43:29 -04:00
|
|
|
self.user_agent = user_agent
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2020-09-01 09:15:22 -04:00
|
|
|
async def get_well_known(self, server_name: bytes) -> WellKnownLookupResult:
|
2019-08-07 10:36:38 -04:00
|
|
|
"""Attempt to fetch and parse a .well-known file for the given server
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 09:15:22 -04:00
|
|
|
server_name: name of the server, from the requested url
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-09-01 09:15:22 -04:00
|
|
|
The result of the lookup
|
2019-08-07 10:36:38 -04:00
|
|
|
"""
|
|
|
|
try:
|
2019-08-12 10:39:14 -04:00
|
|
|
prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
|
|
|
|
server_name
|
|
|
|
)
|
|
|
|
|
|
|
|
now = self._clock.time()
|
|
|
|
if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
|
|
|
|
return WellKnownLookupResult(delegated_server=prev_result)
|
2019-08-07 10:36:38 -04:00
|
|
|
except KeyError:
|
2019-08-12 10:39:14 -04:00
|
|
|
prev_result = None
|
|
|
|
|
|
|
|
# TODO: should we linearise so that we don't end up doing two .well-known
|
|
|
|
# requests for the same server in parallel?
|
|
|
|
try:
|
2019-08-07 10:36:38 -04:00
|
|
|
with Measure(self._clock, "get_well_known"):
|
2021-07-15 12:46:54 -04:00
|
|
|
result: Optional[bytes]
|
|
|
|
cache_period: float
|
|
|
|
|
|
|
|
result, cache_period = await self._fetch_well_known(server_name)
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2019-08-12 10:39:14 -04:00
|
|
|
except _FetchWellKnownFailure as e:
|
|
|
|
if prev_result and e.temporary:
|
|
|
|
# This is a temporary failure and we have a still valid cached
|
|
|
|
# result, so lets return that. Hopefully the next time we ask
|
|
|
|
# the remote will be back up again.
|
|
|
|
return WellKnownLookupResult(delegated_server=prev_result)
|
|
|
|
|
|
|
|
result = None
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
if self._had_valid_well_known_cache.get(server_name, False):
|
|
|
|
# We have recently seen a valid well-known record for this
|
|
|
|
# server, so we cache the lack of well-known for a shorter time.
|
|
|
|
cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
|
|
|
|
else:
|
|
|
|
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
|
2019-08-20 06:46:00 -04:00
|
|
|
|
|
|
|
# add some randomness to the TTL to avoid a stampeding herd
|
|
|
|
cache_period *= random.uniform(
|
|
|
|
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
|
|
|
|
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
|
|
|
|
)
|
2019-08-12 10:39:14 -04:00
|
|
|
|
|
|
|
if cache_period > 0:
|
|
|
|
self._well_known_cache.set(server_name, result, cache_period)
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
return WellKnownLookupResult(delegated_server=result)
|
|
|
|
|
2020-09-01 09:15:22 -04:00
|
|
|
async def _fetch_well_known(self, server_name: bytes) -> Tuple[bytes, float]:
|
2019-08-07 10:36:38 -04:00
|
|
|
"""Actually fetch and parse a .well-known, without checking the cache
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 09:15:22 -04:00
|
|
|
server_name: name of the server, from the requested url
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2019-08-12 10:39:14 -04:00
|
|
|
Raises:
|
|
|
|
_FetchWellKnownFailure if we fail to lookup a result
|
|
|
|
|
2019-08-07 10:36:38 -04:00
|
|
|
Returns:
|
2020-09-01 09:15:22 -04:00
|
|
|
The lookup result and cache period.
|
2019-08-07 10:36:38 -04:00
|
|
|
"""
|
2019-08-13 13:04:46 -04:00
|
|
|
|
2019-08-16 08:15:26 -04:00
|
|
|
had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
|
2019-08-12 10:39:14 -04:00
|
|
|
|
|
|
|
# We do this in two steps to differentiate between possibly transient
|
2020-10-23 12:38:40 -04:00
|
|
|
# errors (e.g. can't connect to host, 503 response) and more permanent
|
2019-08-12 10:39:14 -04:00
|
|
|
# errors (such as getting a 404 response).
|
2020-09-01 09:15:22 -04:00
|
|
|
response, body = await self._make_well_known_request(
|
2019-08-13 13:04:46 -04:00
|
|
|
server_name, retry=had_valid_well_known
|
|
|
|
)
|
2019-08-12 10:39:14 -04:00
|
|
|
|
|
|
|
try:
|
2019-08-07 10:36:38 -04:00
|
|
|
if response.code != 200:
|
|
|
|
raise Exception("Non-200 response %s" % (response.code,))
|
|
|
|
|
2020-08-19 07:26:03 -04:00
|
|
|
parsed_body = json_decoder.decode(body.decode("utf-8"))
|
2019-08-07 10:36:38 -04:00
|
|
|
logger.info("Response from .well-known: %s", parsed_body)
|
2019-08-12 10:39:14 -04:00
|
|
|
|
|
|
|
result = parsed_body["m.server"].encode("ascii")
|
2019-08-13 13:04:46 -04:00
|
|
|
except defer.CancelledError:
|
|
|
|
# Bail if we've been cancelled
|
|
|
|
raise
|
2019-08-07 10:36:38 -04:00
|
|
|
except Exception as e:
|
2019-08-13 13:04:46 -04:00
|
|
|
logger.info("Error parsing well-known for %s: %s", server_name, e)
|
2019-08-12 10:39:14 -04:00
|
|
|
raise _FetchWellKnownFailure(temporary=False)
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
cache_period = _cache_period_from_headers(
|
|
|
|
response.headers, time_now=self._reactor.seconds
|
|
|
|
)
|
|
|
|
if cache_period is None:
|
|
|
|
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
|
|
|
|
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
|
|
|
|
# after startup
|
2019-08-20 06:46:00 -04:00
|
|
|
cache_period *= random.uniform(
|
|
|
|
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
|
|
|
|
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
|
|
|
|
)
|
2019-08-07 10:36:38 -04:00
|
|
|
else:
|
|
|
|
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
|
|
|
|
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
# We got a success, mark as such in the cache
|
|
|
|
self._had_valid_well_known_cache.set(
|
|
|
|
server_name,
|
|
|
|
bool(result),
|
|
|
|
cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
|
|
|
|
)
|
|
|
|
|
2019-08-30 11:28:26 -04:00
|
|
|
return result, cache_period
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2020-09-01 09:15:22 -04:00
|
|
|
async def _make_well_known_request(
|
|
|
|
self, server_name: bytes, retry: bool
|
|
|
|
) -> Tuple[IResponse, bytes]:
|
2019-08-13 13:04:46 -04:00
|
|
|
"""Make the well known request.
|
|
|
|
|
|
|
|
This will retry the request if requested and it fails (with unable
|
|
|
|
to connect or receives a 5xx error).
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 09:15:22 -04:00
|
|
|
server_name: name of the server, from the requested url
|
|
|
|
retry: Whether to retry the request if it fails.
|
2019-08-13 13:04:46 -04:00
|
|
|
|
2020-12-16 17:25:24 -05:00
|
|
|
Raises:
|
|
|
|
_FetchWellKnownFailure if we fail to lookup a result
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
Returns:
|
2020-09-01 09:15:22 -04:00
|
|
|
Returns the response object and body. Response may be a non-200 response.
|
2019-08-13 13:04:46 -04:00
|
|
|
"""
|
|
|
|
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
|
|
|
|
uri_str = uri.decode("ascii")
|
|
|
|
|
2020-06-16 10:43:29 -04:00
|
|
|
headers = {
|
|
|
|
b"User-Agent": [self.user_agent],
|
|
|
|
}
|
|
|
|
|
2019-08-13 13:04:46 -04:00
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
logger.info("Fetching %s", uri_str)
|
|
|
|
try:
|
2020-09-01 09:15:22 -04:00
|
|
|
response = await make_deferred_yieldable(
|
2020-06-16 10:43:29 -04:00
|
|
|
self._well_known_agent.request(
|
|
|
|
b"GET", uri, headers=Headers(headers)
|
|
|
|
)
|
2019-08-13 13:04:46 -04:00
|
|
|
)
|
2020-12-16 17:25:24 -05:00
|
|
|
body_stream = BytesIO()
|
|
|
|
await make_deferred_yieldable(
|
|
|
|
read_body_with_max_size(response, body_stream, WELL_KNOWN_MAX_SIZE)
|
|
|
|
)
|
|
|
|
body = body_stream.getvalue()
|
2019-08-13 13:04:46 -04:00
|
|
|
|
|
|
|
if 500 <= response.code < 600:
|
|
|
|
raise Exception("Non-200 response %s" % (response.code,))
|
|
|
|
|
|
|
|
return response, body
|
|
|
|
except defer.CancelledError:
|
|
|
|
# Bail if we've been cancelled
|
|
|
|
raise
|
2020-12-16 17:25:24 -05:00
|
|
|
except BodyExceededMaxSize:
|
|
|
|
# If the well-known file was too large, do not keep attempting
|
|
|
|
# to download it, but consider it a temporary error.
|
|
|
|
logger.warning(
|
|
|
|
"Requested .well-known file for %s is too large > %r bytes",
|
|
|
|
server_name.decode("ascii"),
|
|
|
|
WELL_KNOWN_MAX_SIZE,
|
|
|
|
)
|
|
|
|
raise _FetchWellKnownFailure(temporary=True)
|
2019-08-13 13:04:46 -04:00
|
|
|
except Exception as e:
|
|
|
|
if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
|
|
|
|
logger.info("Error fetching %s: %s", uri_str, e)
|
|
|
|
raise _FetchWellKnownFailure(temporary=True)
|
|
|
|
|
|
|
|
logger.info("Error fetching %s: %s. Retrying", uri_str, e)
|
|
|
|
|
|
|
|
# Sleep briefly in the hopes that they come back up
|
2020-09-01 09:15:22 -04:00
|
|
|
await self._clock.sleep(0.5)
|
2019-08-13 13:04:46 -04:00
|
|
|
|
2019-08-07 10:36:38 -04:00
|
|
|
|
2020-09-01 09:15:22 -04:00
|
|
|
def _cache_period_from_headers(
|
|
|
|
headers: Headers, time_now: Callable[[], float] = time.time
|
|
|
|
) -> Optional[float]:
|
2019-08-07 10:36:38 -04:00
|
|
|
cache_controls = _parse_cache_control(headers)
|
|
|
|
|
|
|
|
if b"no-store" in cache_controls:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
if b"max-age" in cache_controls:
|
2020-09-01 09:15:22 -04:00
|
|
|
max_age = cache_controls[b"max-age"]
|
|
|
|
if max_age:
|
|
|
|
try:
|
|
|
|
return int(max_age)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2019-08-07 10:36:38 -04:00
|
|
|
|
|
|
|
expires = headers.getRawHeaders(b"expires")
|
|
|
|
if expires is not None:
|
|
|
|
try:
|
|
|
|
expires_date = stringToDatetime(expires[-1])
|
|
|
|
return expires_date - time_now()
|
|
|
|
except ValueError:
|
|
|
|
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
|
|
|
|
# especially the value "0", as representing a time in the past (i.e.,
|
|
|
|
# "already expired").
|
|
|
|
return 0
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2020-09-01 09:15:22 -04:00
|
|
|
def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]:
|
2019-08-07 10:36:38 -04:00
|
|
|
cache_controls = {}
|
2021-03-09 07:41:32 -05:00
|
|
|
cache_control_headers = headers.getRawHeaders(b"cache-control") or []
|
|
|
|
for hdr in cache_control_headers:
|
2019-08-07 10:36:38 -04:00
|
|
|
for directive in hdr.split(b","):
|
|
|
|
splits = [x.strip() for x in directive.split(b"=", 1)]
|
|
|
|
k = splits[0].lower()
|
|
|
|
v = splits[1] if len(splits) > 1 else None
|
|
|
|
cache_controls[k] = v
|
|
|
|
return cache_controls
|
2019-08-12 10:39:14 -04:00
|
|
|
|
|
|
|
|
2020-09-14 12:50:06 -04:00
|
|
|
@attr.s(slots=True)
|
2019-08-12 10:39:14 -04:00
|
|
|
class _FetchWellKnownFailure(Exception):
|
|
|
|
# True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
|
|
|
|
# a temporary failure.
|
2022-05-12 10:33:50 -04:00
|
|
|
temporary: bool = attr.ib()
|