2014-08-12 10:10:52 -04:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2014-08-12 10:10:52 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-07-09 02:09:20 -04:00
|
|
|
import collections
|
|
|
|
import logging
|
|
|
|
import random
|
2018-07-04 13:15:03 -04:00
|
|
|
import re
|
2018-07-09 02:09:20 -04:00
|
|
|
import time
|
2018-07-04 13:15:03 -04:00
|
|
|
|
2018-06-25 09:08:28 -04:00
|
|
|
from twisted.internet import defer
|
2018-07-09 02:09:20 -04:00
|
|
|
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
2014-08-12 10:10:52 -04:00
|
|
|
from twisted.internet.error import ConnectError
|
|
|
|
from twisted.names import client, dns
|
2016-01-20 06:34:09 -05:00
|
|
|
from twisted.names.error import DNSNameError, DomainError
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-01-20 06:34:09 -05:00
|
|
|
SERVER_CACHE = {}
|
|
|
|
|
2017-09-22 15:26:47 -04:00
|
|
|
# our record of an individual server which can be tried to reach a destination.
|
|
|
|
#
|
2018-03-20 05:40:16 -04:00
|
|
|
# "host" is the hostname acquired from the SRV record. Except when there's
|
2017-09-22 15:26:47 -04:00
|
|
|
# no SRV record, in which case it is the original hostname.
|
2016-01-20 06:34:09 -05:00
|
|
|
_Server = collections.namedtuple(
|
2016-03-31 05:04:28 -04:00
|
|
|
"_Server", "priority weight host port expires"
|
2016-01-20 06:34:09 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-07-03 09:36:14 -04:00
|
|
|
def parse_server_name(server_name):
|
|
|
|
"""Split a server name into host/port parts.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
server_name (str): server name to parse
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Tuple[str, int|None]: host/port parts.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ValueError if the server name could not be parsed.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
if server_name[-1] == ']':
|
|
|
|
# ipv6 literal, hopefully
|
|
|
|
return server_name, None
|
|
|
|
|
|
|
|
domain_port = server_name.rsplit(":", 1)
|
|
|
|
domain = domain_port[0]
|
|
|
|
port = int(domain_port[1]) if domain_port[1:] else None
|
|
|
|
return domain, port
|
|
|
|
except Exception:
|
|
|
|
raise ValueError("Invalid server name '%s'" % server_name)
|
|
|
|
|
|
|
|
|
2018-07-04 13:15:03 -04:00
|
|
|
VALID_HOST_REGEX = re.compile(
|
|
|
|
"\\A[0-9a-zA-Z.-]+\\Z",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def parse_and_validate_server_name(server_name):
|
|
|
|
"""Split a server name into host/port parts and do some basic validation.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
server_name (str): server name to parse
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Tuple[str, int|None]: host/port parts.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ValueError if the server name could not be parsed.
|
|
|
|
"""
|
|
|
|
host, port = parse_server_name(server_name)
|
|
|
|
|
|
|
|
# these tests don't need to be bulletproof as we'll find out soon enough
|
|
|
|
# if somebody is giving us invalid data. What we *do* need is to be sure
|
|
|
|
# that nobody is sneaking IP literals in that look like hostnames, etc.
|
|
|
|
|
|
|
|
# look for ipv6 literals
|
|
|
|
if host[0] == '[':
|
|
|
|
if host[-1] != ']':
|
|
|
|
raise ValueError("Mismatched [...] in server name '%s'" % (
|
|
|
|
server_name,
|
|
|
|
))
|
|
|
|
return host, port
|
|
|
|
|
|
|
|
# otherwise it should only be alphanumerics.
|
|
|
|
if not VALID_HOST_REGEX.match(host):
|
|
|
|
raise ValueError("Server name '%s' contains invalid characters" % (
|
|
|
|
server_name,
|
|
|
|
))
|
|
|
|
|
|
|
|
return host, port
|
|
|
|
|
|
|
|
|
2018-06-24 16:38:43 -04:00
|
|
|
def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=None,
|
2014-11-20 13:00:10 -05:00
|
|
|
timeout=None):
|
2014-08-12 10:10:52 -04:00
|
|
|
"""Construct an endpoint for the given matrix destination.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
reactor: Twisted reactor.
|
2018-09-18 12:01:12 -04:00
|
|
|
destination (unicode): The name of the server to connect to.
|
2018-06-25 06:31:16 -04:00
|
|
|
tls_client_options_factory
|
|
|
|
(synapse.crypto.context_factory.ClientTLSOptionsFactory):
|
2018-06-24 16:38:43 -04:00
|
|
|
Factory which generates TLS options for client connections.
|
2014-08-12 10:10:52 -04:00
|
|
|
timeout (int): connection timeout in seconds
|
|
|
|
"""
|
|
|
|
|
2018-07-03 09:36:14 -04:00
|
|
|
domain, port = parse_server_name(destination)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
endpoint_kw_args = {}
|
|
|
|
|
|
|
|
if timeout is not None:
|
|
|
|
endpoint_kw_args.update(timeout=timeout)
|
|
|
|
|
2018-06-24 16:38:43 -04:00
|
|
|
if tls_client_options_factory is None:
|
2016-12-11 04:46:43 -05:00
|
|
|
transport_endpoint = HostnameEndpoint
|
2014-09-02 11:56:57 -04:00
|
|
|
default_port = 8008
|
2014-08-12 10:10:52 -04:00
|
|
|
else:
|
2018-09-18 12:01:12 -04:00
|
|
|
# the SNI string should be the same as the Host header, minus the port.
|
|
|
|
# as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777,
|
|
|
|
# the Host header and SNI should therefore be the server_name of the remote
|
|
|
|
# server.
|
|
|
|
tls_options = tls_client_options_factory.get_options(domain)
|
|
|
|
|
2016-12-12 10:19:54 -05:00
|
|
|
def transport_endpoint(reactor, host, port, timeout):
|
|
|
|
return wrapClientTLS(
|
2018-09-18 12:01:12 -04:00
|
|
|
tls_options,
|
|
|
|
HostnameEndpoint(reactor, host, port, timeout=timeout),
|
|
|
|
)
|
2014-09-02 11:56:57 -04:00
|
|
|
default_port = 8448
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
if port is None:
|
2016-12-28 19:10:49 -05:00
|
|
|
return _WrappingEndpointFac(SRVClientEndpoint(
|
2014-08-12 10:10:52 -04:00
|
|
|
reactor, "matrix", domain, protocol="tcp",
|
|
|
|
default_port=default_port, endpoint=transport_endpoint,
|
|
|
|
endpoint_kw_args=endpoint_kw_args
|
2018-06-25 10:22:57 -04:00
|
|
|
), reactor)
|
2014-08-12 10:10:52 -04:00
|
|
|
else:
|
2016-12-28 19:10:49 -05:00
|
|
|
return _WrappingEndpointFac(transport_endpoint(
|
2016-12-28 19:09:33 -05:00
|
|
|
reactor, domain, port, **endpoint_kw_args
|
2018-06-25 09:08:28 -04:00
|
|
|
), reactor)
|
2016-12-28 17:49:31 -05:00
|
|
|
|
|
|
|
|
2016-12-28 19:10:49 -05:00
|
|
|
class _WrappingEndpointFac(object):
|
2018-06-25 09:08:28 -04:00
|
|
|
def __init__(self, endpoint_fac, reactor):
|
2016-12-28 17:49:31 -05:00
|
|
|
self.endpoint_fac = endpoint_fac
|
2018-06-25 09:08:28 -04:00
|
|
|
self.reactor = reactor
|
2016-12-28 17:49:31 -05:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def connect(self, protocolFactory):
|
|
|
|
conn = yield self.endpoint_fac.connect(protocolFactory)
|
2018-06-25 09:08:28 -04:00
|
|
|
conn = _WrappedConnection(conn, self.reactor)
|
2016-12-28 17:49:31 -05:00
|
|
|
defer.returnValue(conn)
|
|
|
|
|
|
|
|
|
|
|
|
class _WrappedConnection(object):
|
2016-12-29 10:51:04 -05:00
|
|
|
"""Wraps a connection and calls abort on it if it hasn't seen any action
|
|
|
|
for 2.5-3 minutes.
|
2016-12-28 17:49:31 -05:00
|
|
|
"""
|
|
|
|
__slots__ = ["conn", "last_request"]
|
|
|
|
|
2018-06-25 09:08:28 -04:00
|
|
|
def __init__(self, conn, reactor):
|
2016-12-28 17:49:31 -05:00
|
|
|
object.__setattr__(self, "conn", conn)
|
|
|
|
object.__setattr__(self, "last_request", time.time())
|
2018-06-25 09:08:28 -04:00
|
|
|
self._reactor = reactor
|
2016-12-28 17:49:31 -05:00
|
|
|
|
|
|
|
def __getattr__(self, name):
|
|
|
|
return getattr(self.conn, name)
|
|
|
|
|
|
|
|
def __setattr__(self, name, value):
|
|
|
|
setattr(self.conn, name, value)
|
|
|
|
|
|
|
|
def _time_things_out_maybe(self):
|
2016-12-28 19:10:49 -05:00
|
|
|
# We use a slightly shorter timeout here just in case the callLater is
|
|
|
|
# triggered early. Paranoia ftw.
|
2016-12-29 10:51:04 -05:00
|
|
|
# TODO: Cancel the previous callLater rather than comparing time.time()?
|
2016-12-28 19:10:49 -05:00
|
|
|
if time.time() - self.last_request >= 2.5 * 60:
|
2016-12-28 17:49:31 -05:00
|
|
|
self.abort()
|
2016-12-29 10:51:04 -05:00
|
|
|
# Abort the underlying TLS connection. The abort() method calls
|
2018-05-03 07:31:47 -04:00
|
|
|
# loseConnection() on the TLS connection which tries to
|
2016-12-29 10:51:04 -05:00
|
|
|
# shutdown the connection cleanly. We call abortConnection()
|
2018-05-03 07:31:47 -04:00
|
|
|
# since that will promptly close the TLS connection.
|
|
|
|
#
|
|
|
|
# In Twisted >18.4; the TLS connection will be None if it has closed
|
|
|
|
# which will make abortConnection() throw. Check that the TLS connection
|
|
|
|
# is not None before trying to close it.
|
|
|
|
if self.transport.getHandle() is not None:
|
|
|
|
self.transport.abortConnection()
|
2016-12-28 17:49:31 -05:00
|
|
|
|
|
|
|
def request(self, request):
|
|
|
|
self.last_request = time.time()
|
|
|
|
|
|
|
|
# Time this connection out if we haven't send a request in the last
|
|
|
|
# N minutes
|
2016-12-29 10:51:04 -05:00
|
|
|
# TODO: Cancel the previous callLater?
|
2018-06-25 09:08:28 -04:00
|
|
|
self._reactor.callLater(3 * 60, self._time_things_out_maybe)
|
2016-12-28 17:49:31 -05:00
|
|
|
|
|
|
|
d = self.conn.request(request)
|
|
|
|
|
|
|
|
def update_request_time(res):
|
|
|
|
self.last_request = time.time()
|
2016-12-29 10:51:04 -05:00
|
|
|
# TODO: Cancel the previous callLater?
|
2018-06-25 09:08:28 -04:00
|
|
|
self._reactor.callLater(3 * 60, self._time_things_out_maybe)
|
2016-12-28 17:49:31 -05:00
|
|
|
return res
|
|
|
|
|
|
|
|
d.addCallback(update_request_time)
|
|
|
|
|
|
|
|
return d
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
|
2016-04-08 13:37:15 -04:00
|
|
|
class SpiderEndpoint(object):
|
|
|
|
"""An endpoint which refuses to connect to blacklisted IP addresses
|
|
|
|
Implements twisted.internet.interfaces.IStreamClientEndpoint.
|
|
|
|
"""
|
2016-05-01 07:44:24 -04:00
|
|
|
def __init__(self, reactor, host, port, blacklist, whitelist,
|
2016-12-12 10:19:54 -05:00
|
|
|
endpoint=HostnameEndpoint, endpoint_kw_args={}):
|
2016-04-08 13:37:15 -04:00
|
|
|
self.reactor = reactor
|
|
|
|
self.host = host
|
|
|
|
self.port = port
|
|
|
|
self.blacklist = blacklist
|
2016-05-01 07:44:24 -04:00
|
|
|
self.whitelist = whitelist
|
2016-04-08 13:37:15 -04:00
|
|
|
self.endpoint = endpoint
|
|
|
|
self.endpoint_kw_args = endpoint_kw_args
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def connect(self, protocolFactory):
|
|
|
|
address = yield self.reactor.resolve(self.host)
|
|
|
|
|
|
|
|
from netaddr import IPAddress
|
2016-05-01 07:44:24 -04:00
|
|
|
ip_address = IPAddress(address)
|
|
|
|
|
|
|
|
if ip_address in self.blacklist:
|
|
|
|
if self.whitelist is None or ip_address not in self.whitelist:
|
|
|
|
raise ConnectError(
|
|
|
|
"Refusing to spider blacklisted IP address %s" % address
|
|
|
|
)
|
2016-04-08 13:37:15 -04:00
|
|
|
|
|
|
|
logger.info("Connecting to %s:%s", address, self.port)
|
|
|
|
endpoint = self.endpoint(
|
|
|
|
self.reactor, address, self.port, **self.endpoint_kw_args
|
|
|
|
)
|
|
|
|
connection = yield endpoint.connect(protocolFactory)
|
|
|
|
defer.returnValue(connection)
|
|
|
|
|
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
class SRVClientEndpoint(object):
|
|
|
|
"""An endpoint which looks up SRV records for a service.
|
|
|
|
Cycles through the list of servers starting with each call to connect
|
|
|
|
picking the next server.
|
|
|
|
Implements twisted.internet.interfaces.IStreamClientEndpoint.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, reactor, service, domain, protocol="tcp",
|
2016-12-12 10:19:54 -05:00
|
|
|
default_port=None, endpoint=HostnameEndpoint,
|
2014-08-12 10:10:52 -04:00
|
|
|
endpoint_kw_args={}):
|
|
|
|
self.reactor = reactor
|
|
|
|
self.service_name = "_%s._%s.%s" % (service, protocol, domain)
|
|
|
|
|
|
|
|
if default_port is not None:
|
2016-01-21 09:02:14 -05:00
|
|
|
self.default_server = _Server(
|
2014-08-12 10:10:52 -04:00
|
|
|
host=domain,
|
|
|
|
port=default_port,
|
|
|
|
priority=0,
|
2016-03-31 05:04:28 -04:00
|
|
|
weight=0,
|
|
|
|
expires=0,
|
2014-08-12 10:10:52 -04:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.default_server = None
|
|
|
|
|
|
|
|
self.endpoint = endpoint
|
|
|
|
self.endpoint_kw_args = endpoint_kw_args
|
|
|
|
|
|
|
|
self.servers = None
|
|
|
|
self.used_servers = None
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def fetch_servers(self):
|
|
|
|
self.used_servers = []
|
2016-01-20 06:34:09 -05:00
|
|
|
self.servers = yield resolve_service(self.service_name)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
def pick_server(self):
|
|
|
|
if not self.servers:
|
|
|
|
if self.used_servers:
|
|
|
|
self.servers = self.used_servers
|
|
|
|
self.used_servers = []
|
|
|
|
self.servers.sort()
|
|
|
|
elif self.default_server:
|
|
|
|
return self.default_server
|
|
|
|
else:
|
|
|
|
raise ConnectError(
|
2017-09-22 15:26:47 -04:00
|
|
|
"No server available for %s" % self.service_name
|
2014-08-12 10:10:52 -04:00
|
|
|
)
|
|
|
|
|
2017-09-22 15:26:47 -04:00
|
|
|
# look for all servers with the same priority
|
2014-08-12 10:10:52 -04:00
|
|
|
min_priority = self.servers[0].priority
|
|
|
|
weight_indexes = list(
|
|
|
|
(index, server.weight + 1)
|
|
|
|
for index, server in enumerate(self.servers)
|
|
|
|
if server.priority == min_priority
|
|
|
|
)
|
|
|
|
|
|
|
|
total_weight = sum(weight for index, weight in weight_indexes)
|
|
|
|
target_weight = random.randint(0, total_weight)
|
|
|
|
for index, weight in weight_indexes:
|
|
|
|
target_weight -= weight
|
|
|
|
if target_weight <= 0:
|
|
|
|
server = self.servers[index]
|
2017-09-22 15:26:47 -04:00
|
|
|
# XXX: this looks totally dubious:
|
|
|
|
#
|
|
|
|
# (a) we never reuse a server until we have been through
|
|
|
|
# all of the servers at the same priority, so if the
|
|
|
|
# weights are A: 100, B:1, we always do ABABAB instead of
|
|
|
|
# AAAA...AAAB (approximately).
|
|
|
|
#
|
|
|
|
# (b) After using all the servers at the lowest priority,
|
|
|
|
# we move onto the next priority. We should only use the
|
|
|
|
# second priority if servers at the top priority are
|
|
|
|
# unreachable.
|
|
|
|
#
|
2014-08-12 10:10:52 -04:00
|
|
|
del self.servers[index]
|
|
|
|
self.used_servers.append(server)
|
|
|
|
return server
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def connect(self, protocolFactory):
|
|
|
|
if self.servers is None:
|
|
|
|
yield self.fetch_servers()
|
|
|
|
server = self.pick_server()
|
|
|
|
logger.info("Connecting to %s:%s", server.host, server.port)
|
|
|
|
endpoint = self.endpoint(
|
|
|
|
self.reactor, server.host, server.port, **self.endpoint_kw_args
|
|
|
|
)
|
|
|
|
connection = yield endpoint.connect(protocolFactory)
|
|
|
|
defer.returnValue(connection)
|
2016-01-20 06:34:09 -05:00
|
|
|
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-03-31 05:23:48 -04:00
|
|
|
def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time):
|
2016-03-31 05:04:28 -04:00
|
|
|
cache_entry = cache.get(service_name, None)
|
|
|
|
if cache_entry:
|
2016-03-31 05:23:48 -04:00
|
|
|
if all(s.expires > int(clock.time()) for s in cache_entry):
|
2016-03-31 05:04:28 -04:00
|
|
|
servers = list(cache_entry)
|
|
|
|
defer.returnValue(servers)
|
|
|
|
|
2016-01-20 06:34:09 -05:00
|
|
|
servers = []
|
|
|
|
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
answers, _, _ = yield dns_client.lookupService(service_name)
|
|
|
|
except DNSNameError:
|
|
|
|
defer.returnValue([])
|
|
|
|
|
|
|
|
if (len(answers) == 1
|
|
|
|
and answers[0].type == dns.SRV
|
|
|
|
and answers[0].payload
|
2018-04-28 17:56:59 -04:00
|
|
|
and answers[0].payload.target == dns.Name(b'.')):
|
2016-04-08 13:37:15 -04:00
|
|
|
raise ConnectError("Service %s unavailable" % service_name)
|
2016-01-20 06:34:09 -05:00
|
|
|
|
|
|
|
for answer in answers:
|
|
|
|
if answer.type != dns.SRV or not answer.payload:
|
|
|
|
continue
|
|
|
|
|
|
|
|
payload = answer.payload
|
|
|
|
|
2018-03-20 05:40:16 -04:00
|
|
|
servers.append(_Server(
|
|
|
|
host=str(payload.target),
|
|
|
|
port=int(payload.port),
|
|
|
|
priority=int(payload.priority),
|
|
|
|
weight=int(payload.weight),
|
|
|
|
expires=int(clock.time()) + answer.ttl,
|
|
|
|
))
|
2016-01-20 06:34:09 -05:00
|
|
|
|
|
|
|
servers.sort()
|
|
|
|
cache[service_name] = list(servers)
|
|
|
|
except DomainError as e:
|
|
|
|
# We failed to resolve the name (other than a NameError)
|
|
|
|
# Try something in the cache, else rereaise
|
|
|
|
cache_entry = cache.get(service_name, None)
|
|
|
|
if cache_entry:
|
|
|
|
logger.warn(
|
|
|
|
"Failed to resolve %r, falling back to cache. %r",
|
|
|
|
service_name, e
|
|
|
|
)
|
|
|
|
servers = list(cache_entry)
|
|
|
|
else:
|
|
|
|
raise e
|
|
|
|
|
|
|
|
defer.returnValue(servers)
|