2018-09-13 10:15:51 -04:00
|
|
|
#
|
2023-11-21 15:29:58 -05:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2018-09-13 10:15:51 -04:00
|
|
|
#
|
|
|
|
#
|
2024-08-29 03:25:10 -04:00
|
|
|
import io
|
2023-07-18 04:49:21 -04:00
|
|
|
from typing import Any, Dict, Generator
|
|
|
|
from unittest.mock import ANY, Mock, create_autospec
|
2018-09-13 10:15:51 -04:00
|
|
|
|
2019-05-13 14:05:06 -04:00
|
|
|
from netaddr import IPSet
|
2020-09-10 14:55:25 -04:00
|
|
|
from parameterized import parameterized
|
2019-05-13 14:05:06 -04:00
|
|
|
|
2019-01-21 18:29:47 -05:00
|
|
|
from twisted.internet import defer
|
2023-02-06 19:20:04 -05:00
|
|
|
from twisted.internet.defer import Deferred, TimeoutError
|
2018-09-13 10:15:51 -04:00
|
|
|
from twisted.internet.error import ConnectingCancelledError, DNSLookupError
|
2023-02-06 19:20:04 -05:00
|
|
|
from twisted.test.proto_helpers import MemoryReactor, StringTransport
|
2023-07-18 04:49:21 -04:00
|
|
|
from twisted.web.client import Agent, ResponseNeverReceived
|
2018-09-18 13:17:15 -04:00
|
|
|
from twisted.web.http import HTTPChannel
|
2023-07-18 04:49:21 -04:00
|
|
|
from twisted.web.http_headers import Headers
|
2018-09-13 10:15:51 -04:00
|
|
|
|
2023-07-18 04:49:21 -04:00
|
|
|
from synapse.api.errors import HttpResponseException, RequestSendFailed
|
2024-08-29 03:25:10 -04:00
|
|
|
from synapse.api.ratelimiting import Ratelimiter
|
2023-07-18 04:49:21 -04:00
|
|
|
from synapse.config._base import ConfigError
|
2024-08-29 03:25:10 -04:00
|
|
|
from synapse.config.ratelimiting import RatelimitSettings
|
2018-09-18 13:17:15 -04:00
|
|
|
from synapse.http.matrixfederationclient import (
|
2023-04-24 13:12:06 -04:00
|
|
|
ByteParser,
|
2018-09-18 13:17:15 -04:00
|
|
|
MatrixFederationHttpClient,
|
|
|
|
MatrixFederationRequest,
|
|
|
|
)
|
2023-02-06 19:20:04 -05:00
|
|
|
from synapse.logging.context import (
|
|
|
|
SENTINEL_CONTEXT,
|
|
|
|
LoggingContext,
|
|
|
|
LoggingContextOrSentinel,
|
|
|
|
current_context,
|
|
|
|
)
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
from synapse.util import Clock
|
2018-09-13 10:15:51 -04:00
|
|
|
|
2023-07-18 04:49:21 -04:00
|
|
|
from tests.replication._base import BaseMultiWorkerStreamTestCase
|
2018-09-18 13:17:15 -04:00
|
|
|
from tests.server import FakeTransport
|
2023-07-18 04:49:21 -04:00
|
|
|
from tests.test_utils import FakeResponse
|
2023-06-21 04:41:11 -04:00
|
|
|
from tests.unittest import HomeserverTestCase, override_config
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def check_logcontext(context: LoggingContextOrSentinel) -> None:
|
2020-03-24 10:45:33 -04:00
|
|
|
current = current_context()
|
2019-01-21 18:29:47 -05:00
|
|
|
if current is not context:
|
2019-05-10 01:12:11 -04:00
|
|
|
raise AssertionError("Expected logcontext %s but was %s" % (context, current))
|
2019-01-21 18:29:47 -05:00
|
|
|
|
|
|
|
|
2018-09-13 10:15:51 -04:00
|
|
|
class FederationClientTests(HomeserverTestCase):
|
2023-02-06 19:20:04 -05:00
|
|
|
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
2018-09-13 10:15:51 -04:00
|
|
|
hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
|
|
|
|
return hs
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def prepare(
|
|
|
|
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
|
|
|
|
) -> None:
|
2019-02-11 13:03:30 -05:00
|
|
|
self.cl = MatrixFederationHttpClient(self.hs, None)
|
2018-09-13 10:15:51 -04:00
|
|
|
self.reactor.lookups["testserv"] = "1.2.3.4"
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_get(self) -> None:
|
2019-01-21 18:29:47 -05:00
|
|
|
"""
|
|
|
|
happy-path test of a GET request
|
|
|
|
"""
|
2019-05-10 01:12:11 -04:00
|
|
|
|
2019-01-21 18:29:47 -05:00
|
|
|
@defer.inlineCallbacks
|
2023-10-23 14:28:05 -04:00
|
|
|
def do_request() -> Generator["Deferred[Any]", object, object]:
|
2019-01-21 18:29:47 -05:00
|
|
|
with LoggingContext("one") as context:
|
2020-07-30 08:01:33 -04:00
|
|
|
fetch_d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv:8008", "foo/bar")
|
|
|
|
)
|
2019-01-21 18:29:47 -05:00
|
|
|
|
|
|
|
# Nothing happened yet
|
|
|
|
self.assertNoResult(fetch_d)
|
|
|
|
|
|
|
|
# should have reset logcontext to the sentinel
|
2020-03-24 10:45:33 -04:00
|
|
|
check_logcontext(SENTINEL_CONTEXT)
|
2019-01-21 18:29:47 -05:00
|
|
|
|
|
|
|
try:
|
|
|
|
fetch_res = yield fetch_d
|
2019-07-23 09:00:55 -04:00
|
|
|
return fetch_res
|
2019-01-21 18:29:47 -05:00
|
|
|
finally:
|
|
|
|
check_logcontext(context)
|
|
|
|
|
|
|
|
test_d = do_request()
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Nothing happened yet
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
# Make sure treq is trying to connect
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(host, port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8008)
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
protocol = factory.buildProtocol(None)
|
|
|
|
transport = StringTransport()
|
|
|
|
protocol.makeConnection(transport)
|
|
|
|
|
|
|
|
# that should have made it send the request to the transport
|
|
|
|
self.assertRegex(transport.value(), b"^GET /foo/bar")
|
2019-01-25 07:38:16 -05:00
|
|
|
self.assertRegex(transport.value(), b"Host: testserv:8008")
|
2019-01-21 18:29:47 -05:00
|
|
|
|
|
|
|
# Deferred is still without a result
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
# Send it the HTTP response
|
2021-07-13 06:43:15 -04:00
|
|
|
res_json = b'{ "a": 1 }'
|
2019-01-21 18:29:47 -05:00
|
|
|
protocol.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Server: Fake\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"Content-Length: %i\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b"%s" % (len(res_json), res_json)
|
|
|
|
)
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
res = self.successResultOf(test_d)
|
|
|
|
|
|
|
|
# check the response is as expected
|
|
|
|
self.assertEqual(res, {"a": 1})
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_dns_error(self) -> None:
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
2019-01-18 07:07:38 -05:00
|
|
|
If the DNS lookup returns an error, it will bubble up.
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000)
|
|
|
|
)
|
2018-09-13 10:15:51 -04:00
|
|
|
self.pump()
|
|
|
|
|
|
|
|
f = self.failureResultOf(d)
|
2019-01-08 06:04:28 -05:00
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
self.assertIsInstance(f.value.inner_exception, DNSLookupError)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_connection_refused(self) -> None:
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
|
|
|
|
)
|
2019-01-21 18:29:47 -05:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Nothing happened yet
|
|
|
|
self.assertNoResult(d)
|
|
|
|
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(host, port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8008)
|
|
|
|
e = Exception("go away")
|
|
|
|
factory.clientConnectionFailed(None, e)
|
|
|
|
self.pump(0.5)
|
|
|
|
|
|
|
|
f = self.failureResultOf(d)
|
|
|
|
|
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
self.assertIs(f.value.inner_exception, e)
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_never_connect(self) -> None:
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
|
|
|
If the HTTP request is not connected and is timed out, it'll give a
|
2018-09-24 11:51:59 -04:00
|
|
|
ConnectingCancelledError or TimeoutError.
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
|
|
|
|
)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Nothing happened yet
|
2019-01-18 07:07:38 -05:00
|
|
|
self.assertNoResult(d)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
# Make sure treq is trying to connect
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
self.assertEqual(clients[0][0], "1.2.3.4")
|
|
|
|
self.assertEqual(clients[0][1], 8008)
|
|
|
|
|
|
|
|
# Deferred is still without a result
|
2019-01-18 07:07:38 -05:00
|
|
|
self.assertNoResult(d)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
# Push by enough to time it out
|
|
|
|
self.reactor.advance(10.5)
|
|
|
|
f = self.failureResultOf(d)
|
|
|
|
|
2019-01-08 06:04:28 -05:00
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
self.assertIsInstance(
|
2019-05-10 01:12:11 -04:00
|
|
|
f.value.inner_exception, (ConnectingCancelledError, TimeoutError)
|
2019-01-08 06:04:28 -05:00
|
|
|
)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_connect_no_response(self) -> None:
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
|
|
|
If the HTTP request is connected, but gets no response before being
|
|
|
|
timed out, it'll give a ResponseNeverReceived.
|
|
|
|
"""
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
|
|
|
|
)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Nothing happened yet
|
2019-01-18 07:07:38 -05:00
|
|
|
self.assertNoResult(d)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
# Make sure treq is trying to connect
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
self.assertEqual(clients[0][0], "1.2.3.4")
|
|
|
|
self.assertEqual(clients[0][1], 8008)
|
|
|
|
|
|
|
|
conn = Mock()
|
|
|
|
client = clients[0][2].buildProtocol(None)
|
|
|
|
client.makeConnection(conn)
|
|
|
|
|
|
|
|
# Deferred is still without a result
|
2019-01-18 07:07:38 -05:00
|
|
|
self.assertNoResult(d)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
# Push by enough to time it out
|
|
|
|
self.reactor.advance(10.5)
|
|
|
|
f = self.failureResultOf(d)
|
|
|
|
|
2019-01-08 06:04:28 -05:00
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
2023-05-19 08:25:25 -04:00
|
|
|
def test_client_ip_range_blocklist(self) -> None:
|
|
|
|
"""Ensure that Synapse does not try to connect to blocked IPs"""
|
2019-05-13 14:05:06 -04:00
|
|
|
|
2023-05-19 08:25:25 -04:00
|
|
|
# Set up the ip_range blocklist
|
|
|
|
self.hs.config.server.federation_ip_range_blocklist = IPSet(
|
2019-05-13 14:05:06 -04:00
|
|
|
["127.0.0.0/8", "fe80::/64"]
|
|
|
|
)
|
|
|
|
self.reactor.lookups["internal"] = "127.0.0.1"
|
|
|
|
self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337"
|
|
|
|
self.reactor.lookups["fine"] = "10.20.30.40"
|
|
|
|
cl = MatrixFederationHttpClient(self.hs, None)
|
|
|
|
|
2023-05-19 08:25:25 -04:00
|
|
|
# Try making a GET request to a blocked IPv4 address
|
2019-05-13 14:05:06 -04:00
|
|
|
# ------------------------------------------------------
|
|
|
|
# Make the request
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(cl.get_json("internal:8008", "foo/bar", timeout=10000))
|
2019-05-13 14:05:06 -04:00
|
|
|
|
|
|
|
# Nothing happened yet
|
|
|
|
self.assertNoResult(d)
|
|
|
|
|
|
|
|
self.pump(1)
|
|
|
|
|
|
|
|
# Check that it was unable to resolve the address
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 0)
|
|
|
|
|
|
|
|
f = self.failureResultOf(d)
|
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
self.assertIsInstance(f.value.inner_exception, DNSLookupError)
|
|
|
|
|
2023-05-19 08:25:25 -04:00
|
|
|
# Try making a POST request to a blocked IPv6 address
|
2019-05-13 14:05:06 -04:00
|
|
|
# -------------------------------------------------------
|
|
|
|
# Make the request
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
cl.post_json("internalv6:8008", "foo/bar", timeout=10000)
|
|
|
|
)
|
2019-05-13 14:05:06 -04:00
|
|
|
|
|
|
|
# Nothing has happened yet
|
|
|
|
self.assertNoResult(d)
|
|
|
|
|
|
|
|
# Move the reactor forwards
|
|
|
|
self.pump(1)
|
|
|
|
|
|
|
|
# Check that it was unable to resolve the address
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 0)
|
|
|
|
|
2023-05-19 08:25:25 -04:00
|
|
|
# Check that it was due to a blocked DNS lookup
|
2019-05-13 14:05:06 -04:00
|
|
|
f = self.failureResultOf(d, RequestSendFailed)
|
|
|
|
self.assertIsInstance(f.value.inner_exception, DNSLookupError)
|
|
|
|
|
2023-05-19 08:25:25 -04:00
|
|
|
# Try making a GET request to an allowed IPv4 address
|
2019-05-13 14:05:06 -04:00
|
|
|
# ----------------------------------------------------------
|
|
|
|
# Make the request
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(cl.post_json("fine:8008", "foo/bar", timeout=10000))
|
2019-05-13 14:05:06 -04:00
|
|
|
|
|
|
|
# Nothing has happened yet
|
|
|
|
self.assertNoResult(d)
|
|
|
|
|
|
|
|
# Move the reactor forwards
|
|
|
|
self.pump(1)
|
|
|
|
|
|
|
|
# Check that it was able to resolve the address
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertNotEqual(len(clients), 0)
|
|
|
|
|
|
|
|
# Connection will still fail as this IP address does not resolve to anything
|
|
|
|
f = self.failureResultOf(d, RequestSendFailed)
|
|
|
|
self.assertIsInstance(f.value.inner_exception, ConnectingCancelledError)
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_gets_headers(self) -> None:
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
|
|
|
Once the client gets the headers, _request returns successfully.
|
|
|
|
"""
|
2018-09-18 13:17:15 -04:00
|
|
|
request = MatrixFederationRequest(
|
2019-05-10 01:12:11 -04:00
|
|
|
method="GET", destination="testserv:8008", path="foo/bar"
|
2018-09-18 13:17:15 -04:00
|
|
|
)
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(self.cl._send_request(request, timeout=10000))
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
conn = Mock()
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
client = clients[0][2].buildProtocol(None)
|
|
|
|
client.makeConnection(conn)
|
|
|
|
|
|
|
|
# Deferred does not have a result
|
2019-01-18 07:07:38 -05:00
|
|
|
self.assertNoResult(d)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
# Send it the HTTP response
|
|
|
|
client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n")
|
|
|
|
|
|
|
|
# We should get a successful response
|
|
|
|
r = self.successResultOf(d)
|
|
|
|
self.assertEqual(r.code, 200)
|
|
|
|
|
2024-08-29 03:25:10 -04:00
|
|
|
def test_authed_media_redirect_response(self) -> None:
|
|
|
|
"""
|
|
|
|
Validate that, when following a `Location` redirect, the
|
|
|
|
maximum size is _not_ set to the initial response `Content-Length` and
|
|
|
|
the media file can be downloaded.
|
|
|
|
"""
|
|
|
|
limiter = Ratelimiter(
|
|
|
|
store=self.hs.get_datastores().main,
|
|
|
|
clock=self.clock,
|
|
|
|
cfg=RatelimitSettings(key="", per_second=0.17, burst_count=1048576),
|
|
|
|
)
|
|
|
|
|
|
|
|
output_stream = io.BytesIO()
|
|
|
|
|
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.federation_get_file(
|
|
|
|
"testserv:8008", "path", output_stream, limiter, "127.0.0.1", 10000
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(host, port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8008)
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
protocol = factory.buildProtocol(None)
|
|
|
|
transport = StringTransport()
|
|
|
|
protocol.makeConnection(transport)
|
|
|
|
|
|
|
|
# Deferred does not have a result
|
|
|
|
self.assertNoResult(d)
|
|
|
|
|
|
|
|
redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: http://testserv:8008/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
|
|
|
|
protocol.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Server: Fake\r\n"
|
|
|
|
b"Content-Length: %i\r\n"
|
|
|
|
b"Content-Type: multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a\r\n\r\n"
|
|
|
|
% (len(redirect_data))
|
|
|
|
)
|
|
|
|
protocol.dataReceived(redirect_data)
|
|
|
|
|
|
|
|
# Still no result, not followed the redirect yet
|
|
|
|
self.assertNoResult(d)
|
|
|
|
|
|
|
|
# Now send the response returned by the server at `Location`
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
(host, port, factory, _timeout, _bindAddress) = clients[1]
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8008)
|
|
|
|
protocol = factory.buildProtocol(None)
|
|
|
|
transport = StringTransport()
|
|
|
|
protocol.makeConnection(transport)
|
|
|
|
|
|
|
|
# make sure the length is longer than the initial response
|
|
|
|
data = b"Hello world!" * 30
|
|
|
|
protocol.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Server: Fake\r\n"
|
|
|
|
b"Content-Length: %i\r\n"
|
|
|
|
b"Content-Type: text/plain\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b"%s\r\n"
|
|
|
|
b"\r\n" % (len(data), data)
|
|
|
|
)
|
|
|
|
|
|
|
|
# We should get a successful response
|
|
|
|
length, _, _ = self.successResultOf(d)
|
|
|
|
self.assertEqual(length, len(data))
|
|
|
|
self.assertEqual(output_stream.getvalue(), data)
|
|
|
|
|
2020-09-29 05:29:21 -04:00
|
|
|
@parameterized.expand(["get_json", "post_json", "delete_json", "put_json"])
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_timeout_reading_body(self, method_name: str) -> None:
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
|
|
|
If the HTTP request is connected, but gets no response before being
|
2020-09-29 05:29:21 -04:00
|
|
|
timed out, it'll give a RequestSendFailed with can_retry.
|
2018-09-13 10:15:51 -04:00
|
|
|
"""
|
2020-09-29 05:29:21 -04:00
|
|
|
method = getattr(self.cl, method_name)
|
|
|
|
d = defer.ensureDeferred(method("testserv:8008", "foo/bar", timeout=10000))
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
conn = Mock()
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
client = clients[0][2].buildProtocol(None)
|
|
|
|
client.makeConnection(conn)
|
|
|
|
|
|
|
|
# Deferred does not have a result
|
2019-01-18 07:07:38 -05:00
|
|
|
self.assertNoResult(d)
|
2018-09-13 10:15:51 -04:00
|
|
|
|
|
|
|
# Send it the HTTP response
|
|
|
|
client.dataReceived(
|
2021-07-13 06:43:15 -04:00
|
|
|
b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
|
|
|
|
b"Server: Fake\r\n\r\n"
|
2018-09-13 10:15:51 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# Push by enough to time it out
|
|
|
|
self.reactor.advance(10.5)
|
|
|
|
f = self.failureResultOf(d)
|
|
|
|
|
2020-09-29 05:29:21 -04:00
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
self.assertTrue(f.value.can_retry)
|
|
|
|
self.assertIsInstance(f.value.inner_exception, defer.TimeoutError)
|
2018-09-18 13:17:15 -04:00
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_requires_trailing_slashes(self) -> None:
|
2019-03-13 17:21:03 -04:00
|
|
|
"""
|
|
|
|
If a connection is made to a client but the client rejects it due to
|
|
|
|
requiring a trailing slash. We need to retry the request with a
|
2023-11-15 08:02:11 -05:00
|
|
|
trailing slash. Workaround for Synapse <= v0.99.3, explained in
|
|
|
|
https://github.com/matrix-org/synapse/issues/3622.
|
2019-03-13 17:21:03 -04:00
|
|
|
"""
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
|
|
|
|
)
|
2019-03-13 17:21:03 -04:00
|
|
|
|
2019-03-20 06:50:44 -04:00
|
|
|
# Send the request
|
2019-03-13 17:21:03 -04:00
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# there should have been a call to connectTCP
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(_host, _port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
client = factory.buildProtocol(None)
|
|
|
|
conn = StringTransport()
|
|
|
|
client.makeConnection(conn)
|
|
|
|
|
|
|
|
# that should have made it send the request to the connection
|
|
|
|
self.assertRegex(conn.value(), b"^GET /foo/bar")
|
|
|
|
|
2019-03-20 06:50:44 -04:00
|
|
|
# Clear the original request data before sending a response
|
|
|
|
conn.clear()
|
|
|
|
|
2019-03-13 17:21:03 -04:00
|
|
|
# Send the HTTP response
|
|
|
|
client.dataReceived(
|
|
|
|
b"HTTP/1.1 400 Bad Request\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"Content-Length: 59\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b'{"errcode":"M_UNRECOGNIZED","error":"Unrecognized request"}'
|
|
|
|
)
|
|
|
|
|
2019-03-20 06:50:44 -04:00
|
|
|
# We should get another request with a trailing slash
|
2019-03-13 17:21:03 -04:00
|
|
|
self.assertRegex(conn.value(), b"^GET /foo/bar/")
|
|
|
|
|
|
|
|
# Send a happy response this time
|
|
|
|
client.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"Content-Length: 2\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b"{}"
|
|
|
|
)
|
|
|
|
|
|
|
|
# We should get a successful response
|
|
|
|
r = self.successResultOf(d)
|
|
|
|
self.assertEqual(r, {})
|
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_does_not_retry_on_400_plus(self) -> None:
|
2019-03-20 10:00:39 -04:00
|
|
|
"""
|
|
|
|
Another test for trailing slashes but now test that we don't retry on
|
|
|
|
trailing slashes on a non-400/M_UNRECOGNIZED response.
|
|
|
|
|
|
|
|
See test_client_requires_trailing_slashes() for context.
|
|
|
|
"""
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(
|
|
|
|
self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
|
|
|
|
)
|
2019-03-20 10:00:39 -04:00
|
|
|
|
|
|
|
# Send the request
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# there should have been a call to connectTCP
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(_host, _port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
client = factory.buildProtocol(None)
|
|
|
|
conn = StringTransport()
|
|
|
|
client.makeConnection(conn)
|
|
|
|
|
|
|
|
# that should have made it send the request to the connection
|
|
|
|
self.assertRegex(conn.value(), b"^GET /foo/bar")
|
|
|
|
|
|
|
|
# Clear the original request data before sending a response
|
|
|
|
conn.clear()
|
|
|
|
|
|
|
|
# Send the HTTP response
|
|
|
|
client.dataReceived(
|
|
|
|
b"HTTP/1.1 404 Not Found\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"Content-Length: 2\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b"{}"
|
|
|
|
)
|
|
|
|
|
|
|
|
# We should not get another request
|
|
|
|
self.assertEqual(conn.value(), b"")
|
|
|
|
|
|
|
|
# We should get a 404 failure response
|
2019-03-20 10:08:57 -04:00
|
|
|
self.failureResultOf(d)
|
2019-03-20 10:00:39 -04:00
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_client_sends_body(self) -> None:
|
2020-07-30 08:01:33 -04:00
|
|
|
defer.ensureDeferred(
|
|
|
|
self.cl.post_json(
|
|
|
|
"testserv:8008", "foo/bar", timeout=10000, data={"a": "b"}
|
|
|
|
)
|
|
|
|
)
|
2018-09-18 13:17:15 -04:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
client = clients[0][2].buildProtocol(None)
|
|
|
|
server = HTTPChannel()
|
|
|
|
|
|
|
|
client.makeConnection(FakeTransport(server, self.reactor))
|
|
|
|
server.makeConnection(FakeTransport(client, self.reactor))
|
|
|
|
|
|
|
|
self.pump(0.1)
|
|
|
|
|
|
|
|
self.assertEqual(len(server.requests), 1)
|
|
|
|
request = server.requests[0]
|
|
|
|
content = request.content.read()
|
|
|
|
self.assertEqual(content, b'{"a":"b"}')
|
2019-01-18 07:07:38 -05:00
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_closes_connection(self) -> None:
|
2019-01-18 07:07:38 -05:00
|
|
|
"""Check that the client closes unused HTTP connections"""
|
2020-07-30 08:01:33 -04:00
|
|
|
d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar"))
|
2019-01-18 07:07:38 -05:00
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# there should have been a call to connectTCP
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(_host, _port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
client = factory.buildProtocol(None)
|
|
|
|
conn = StringTransport()
|
|
|
|
client.makeConnection(conn)
|
|
|
|
|
|
|
|
# that should have made it send the request to the connection
|
|
|
|
self.assertRegex(conn.value(), b"^GET /foo/bar")
|
|
|
|
|
|
|
|
# Send the HTTP response
|
|
|
|
client.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"Content-Length: 2\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b"{}"
|
|
|
|
)
|
|
|
|
|
|
|
|
# We should get a successful response
|
|
|
|
r = self.successResultOf(d)
|
|
|
|
self.assertEqual(r, {})
|
|
|
|
|
|
|
|
self.assertFalse(conn.disconnecting)
|
|
|
|
|
|
|
|
# wait for a while
|
2020-08-27 06:39:53 -04:00
|
|
|
self.reactor.advance(120)
|
2019-01-18 07:07:38 -05:00
|
|
|
|
|
|
|
self.assertTrue(conn.disconnecting)
|
2020-09-10 14:55:25 -04:00
|
|
|
|
|
|
|
@parameterized.expand([(b"",), (b"foo",), (b'{"a": Infinity}',)])
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_json_error(self, return_value: bytes) -> None:
|
2020-09-10 14:55:25 -04:00
|
|
|
"""
|
|
|
|
Test what happens if invalid JSON is returned from the remote endpoint.
|
|
|
|
"""
|
|
|
|
|
|
|
|
test_d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar"))
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Nothing happened yet
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
# Make sure treq is trying to connect
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(host, port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8008)
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
protocol = factory.buildProtocol(None)
|
|
|
|
transport = StringTransport()
|
|
|
|
protocol.makeConnection(transport)
|
|
|
|
|
|
|
|
# that should have made it send the request to the transport
|
|
|
|
self.assertRegex(transport.value(), b"^GET /foo/bar")
|
|
|
|
self.assertRegex(transport.value(), b"Host: testserv:8008")
|
|
|
|
|
|
|
|
# Deferred is still without a result
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
# Send it the HTTP response
|
|
|
|
protocol.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Server: Fake\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"Content-Length: %i\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
b"%s" % (len(return_value), return_value)
|
|
|
|
)
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
f = self.failureResultOf(test_d)
|
2021-01-12 11:07:01 -05:00
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
2021-04-23 06:08:41 -04:00
|
|
|
|
2023-02-06 19:20:04 -05:00
|
|
|
def test_too_big(self) -> None:
|
2021-04-23 06:08:41 -04:00
|
|
|
"""
|
|
|
|
Test what happens if a huge response is returned from the remote endpoint.
|
|
|
|
"""
|
|
|
|
|
|
|
|
test_d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar"))
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Nothing happened yet
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
# Make sure treq is trying to connect
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(host, port, factory, _timeout, _bindAddress) = clients[0]
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8008)
|
|
|
|
|
|
|
|
# complete the connection and wire it up to a fake transport
|
|
|
|
protocol = factory.buildProtocol(None)
|
|
|
|
transport = StringTransport()
|
|
|
|
protocol.makeConnection(transport)
|
|
|
|
|
|
|
|
# that should have made it send the request to the transport
|
|
|
|
self.assertRegex(transport.value(), b"^GET /foo/bar")
|
|
|
|
self.assertRegex(transport.value(), b"Host: testserv:8008")
|
|
|
|
|
|
|
|
# Deferred is still without a result
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
# Send it a huge HTTP response
|
|
|
|
protocol.dataReceived(
|
|
|
|
b"HTTP/1.1 200 OK\r\n"
|
|
|
|
b"Server: Fake\r\n"
|
|
|
|
b"Content-Type: application/json\r\n"
|
|
|
|
b"\r\n"
|
|
|
|
)
|
|
|
|
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# should still be waiting
|
|
|
|
self.assertNoResult(test_d)
|
|
|
|
|
|
|
|
sent = 0
|
|
|
|
chunk_size = 1024 * 512
|
|
|
|
while not test_d.called:
|
|
|
|
protocol.dataReceived(b"a" * chunk_size)
|
|
|
|
sent += chunk_size
|
2023-04-24 13:12:06 -04:00
|
|
|
self.assertLessEqual(sent, ByteParser.MAX_RESPONSE_SIZE)
|
2021-04-23 06:08:41 -04:00
|
|
|
|
2023-04-24 13:12:06 -04:00
|
|
|
self.assertEqual(sent, ByteParser.MAX_RESPONSE_SIZE)
|
2021-04-23 06:08:41 -04:00
|
|
|
|
|
|
|
f = self.failureResultOf(test_d)
|
|
|
|
self.assertIsInstance(f.value, RequestSendFailed)
|
|
|
|
|
|
|
|
self.assertTrue(transport.disconnecting)
|
2022-06-14 13:28:26 -04:00
|
|
|
|
|
|
|
def test_build_auth_headers_rejects_falsey_destinations(self) -> None:
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
self.cl.build_auth_headers(None, b"GET", b"https://example.com")
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
self.cl.build_auth_headers(b"", b"GET", b"https://example.com")
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
self.cl.build_auth_headers(
|
|
|
|
None, b"GET", b"https://example.com", destination_is=b""
|
|
|
|
)
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
self.cl.build_auth_headers(
|
|
|
|
b"", b"GET", b"https://example.com", destination_is=b""
|
|
|
|
)
|
2023-06-21 04:41:11 -04:00
|
|
|
|
|
|
|
@override_config(
|
|
|
|
{
|
|
|
|
"federation": {
|
|
|
|
"client_timeout": "180s",
|
|
|
|
"max_long_retry_delay": "100s",
|
|
|
|
"max_short_retry_delay": "7s",
|
|
|
|
"max_long_retries": 20,
|
|
|
|
"max_short_retries": 5,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)
|
|
|
|
def test_configurable_retry_and_delay_values(self) -> None:
|
|
|
|
self.assertEqual(self.cl.default_timeout_seconds, 180)
|
|
|
|
self.assertEqual(self.cl.max_long_retry_delay_seconds, 100)
|
|
|
|
self.assertEqual(self.cl.max_short_retry_delay_seconds, 7)
|
|
|
|
self.assertEqual(self.cl.max_long_retries, 20)
|
|
|
|
self.assertEqual(self.cl.max_short_retries, 5)
|
2023-07-18 04:49:21 -04:00
|
|
|
|
|
|
|
|
|
|
|
class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
|
|
|
|
def default_config(self) -> Dict[str, Any]:
|
|
|
|
conf = super().default_config()
|
|
|
|
conf["instance_map"] = {
|
|
|
|
"main": {"host": "testserv", "port": 8765},
|
|
|
|
"federation_sender": {"host": "testserv", "port": 1001},
|
|
|
|
}
|
|
|
|
return conf
|
|
|
|
|
|
|
|
@override_config(
|
|
|
|
{
|
|
|
|
"outbound_federation_restricted_to": ["federation_sender"],
|
|
|
|
"worker_replication_secret": "secret",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
def test_proxy_requests_through_federation_sender_worker(self) -> None:
|
|
|
|
"""
|
|
|
|
Test that all outbound federation requests go through the `federation_sender`
|
|
|
|
worker
|
|
|
|
"""
|
|
|
|
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
|
|
|
# so we can act like some remote server responding to requests
|
|
|
|
mock_client_on_federation_sender = Mock()
|
|
|
|
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
|
|
|
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
|
|
|
|
|
|
|
# Create the `federation_sender` worker
|
|
|
|
self.make_worker_hs(
|
|
|
|
"synapse.app.generic_worker",
|
|
|
|
{"worker_name": "federation_sender"},
|
|
|
|
federation_http_client=mock_client_on_federation_sender,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Fake `remoteserv:8008` responding to requests
|
|
|
|
mock_agent_on_federation_sender.request.side_effect = (
|
|
|
|
lambda *args, **kwargs: defer.succeed(
|
|
|
|
FakeResponse.json(
|
|
|
|
payload={
|
|
|
|
"foo": "bar",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# This federation request from the main process should be proxied through the
|
|
|
|
# `federation_sender` worker off to the remote server
|
|
|
|
test_request_from_main_process_d = defer.ensureDeferred(
|
|
|
|
self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
|
|
|
|
)
|
|
|
|
|
|
|
|
# Pump the reactor so our deferred goes through the motions
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Make sure that the request was proxied through the `federation_sender` worker
|
|
|
|
mock_agent_on_federation_sender.request.assert_called_once_with(
|
|
|
|
b"GET",
|
|
|
|
b"matrix-federation://remoteserv:8008/foo/bar",
|
|
|
|
headers=ANY,
|
|
|
|
bodyProducer=ANY,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Make sure the response is as expected back on the main worker
|
|
|
|
res = self.successResultOf(test_request_from_main_process_d)
|
|
|
|
self.assertEqual(res, {"foo": "bar"})
|
|
|
|
|
|
|
|
@override_config(
|
|
|
|
{
|
|
|
|
"outbound_federation_restricted_to": ["federation_sender"],
|
|
|
|
"worker_replication_secret": "secret",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
def test_proxy_request_with_network_error_through_federation_sender_worker(
|
|
|
|
self,
|
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Test that when the outbound federation request fails with a network related
|
|
|
|
error, a sensible error makes its way back to the main process.
|
|
|
|
"""
|
|
|
|
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
|
|
|
# so we can act like some remote server responding to requests
|
|
|
|
mock_client_on_federation_sender = Mock()
|
|
|
|
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
|
|
|
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
|
|
|
|
|
|
|
# Create the `federation_sender` worker
|
|
|
|
self.make_worker_hs(
|
|
|
|
"synapse.app.generic_worker",
|
|
|
|
{"worker_name": "federation_sender"},
|
|
|
|
federation_http_client=mock_client_on_federation_sender,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Fake `remoteserv:8008` responding to requests
|
|
|
|
mock_agent_on_federation_sender.request.side_effect = (
|
|
|
|
lambda *args, **kwargs: defer.fail(ResponseNeverReceived("fake error"))
|
|
|
|
)
|
|
|
|
|
|
|
|
# This federation request from the main process should be proxied through the
|
|
|
|
# `federation_sender` worker off to the remote server
|
|
|
|
test_request_from_main_process_d = defer.ensureDeferred(
|
|
|
|
self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
|
|
|
|
)
|
|
|
|
|
|
|
|
# Pump the reactor so our deferred goes through the motions. We pump with 10
|
|
|
|
# seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries
|
|
|
|
# and finally passes along the error response.
|
|
|
|
self.pump(0.1)
|
|
|
|
|
|
|
|
# Make sure that the request was proxied through the `federation_sender` worker
|
|
|
|
mock_agent_on_federation_sender.request.assert_called_with(
|
|
|
|
b"GET",
|
|
|
|
b"matrix-federation://remoteserv:8008/foo/bar",
|
|
|
|
headers=ANY,
|
|
|
|
bodyProducer=ANY,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Make sure we get some sort of error back on the main worker
|
|
|
|
failure_res = self.failureResultOf(test_request_from_main_process_d)
|
|
|
|
self.assertIsInstance(failure_res.value, RequestSendFailed)
|
|
|
|
self.assertIsInstance(failure_res.value.inner_exception, HttpResponseException)
|
|
|
|
self.assertEqual(failure_res.value.inner_exception.code, 502)
|
|
|
|
|
|
|
|
@override_config(
|
|
|
|
{
|
|
|
|
"outbound_federation_restricted_to": ["federation_sender"],
|
|
|
|
"worker_replication_secret": "secret",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None:
|
|
|
|
"""
|
|
|
|
Test to make sure hop-by-hop headers and addional headers defined in the
|
|
|
|
`Connection` header are discarded when proxying requests
|
|
|
|
"""
|
|
|
|
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
|
|
|
# so we can act like some remote server responding to requests
|
|
|
|
mock_client_on_federation_sender = Mock()
|
|
|
|
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
|
|
|
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
|
|
|
|
|
|
|
# Create the `federation_sender` worker
|
|
|
|
self.make_worker_hs(
|
|
|
|
"synapse.app.generic_worker",
|
|
|
|
{"worker_name": "federation_sender"},
|
|
|
|
federation_http_client=mock_client_on_federation_sender,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Fake `remoteserv:8008` responding to requests
|
|
|
|
mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed(
|
|
|
|
FakeResponse(
|
|
|
|
code=200,
|
|
|
|
body=b'{"foo": "bar"}',
|
|
|
|
headers=Headers(
|
|
|
|
{
|
|
|
|
"Content-Type": ["application/json"],
|
|
|
|
"Connection": ["close, X-Foo, X-Bar"],
|
|
|
|
# Should be removed because it's defined in the `Connection` header
|
|
|
|
"X-Foo": ["foo"],
|
|
|
|
"X-Bar": ["bar"],
|
|
|
|
# Should be removed because it's a hop-by-hop header
|
|
|
|
"Proxy-Authorization": "abcdef",
|
|
|
|
}
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# This federation request from the main process should be proxied through the
|
|
|
|
# `federation_sender` worker off to the remote server
|
|
|
|
test_request_from_main_process_d = defer.ensureDeferred(
|
|
|
|
self.hs.get_federation_http_client().get_json_with_headers(
|
|
|
|
"remoteserv:8008", "foo/bar"
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Pump the reactor so our deferred goes through the motions
|
|
|
|
self.pump()
|
|
|
|
|
|
|
|
# Make sure that the request was proxied through the `federation_sender` worker
|
|
|
|
mock_agent_on_federation_sender.request.assert_called_once_with(
|
|
|
|
b"GET",
|
|
|
|
b"matrix-federation://remoteserv:8008/foo/bar",
|
|
|
|
headers=ANY,
|
|
|
|
bodyProducer=ANY,
|
|
|
|
)
|
|
|
|
|
|
|
|
res, headers = self.successResultOf(test_request_from_main_process_d)
|
|
|
|
header_names = set(headers.keys())
|
|
|
|
|
|
|
|
# Make sure the response does not include the hop-by-hop headers
|
|
|
|
self.assertNotIn(b"X-Foo", header_names)
|
|
|
|
self.assertNotIn(b"X-Bar", header_names)
|
|
|
|
self.assertNotIn(b"Proxy-Authorization", header_names)
|
|
|
|
# Make sure the response is as expected back on the main worker
|
|
|
|
self.assertEqual(res, {"foo": "bar"})
|
|
|
|
|
|
|
|
@override_config(
|
|
|
|
{
|
|
|
|
"outbound_federation_restricted_to": ["federation_sender"],
|
|
|
|
# `worker_replication_secret` is set here so that the test setup is able to pass
|
|
|
|
# but the actual homserver creation test is in the test body below
|
|
|
|
"worker_replication_secret": "secret",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
def test_not_able_to_proxy_requests_through_federation_sender_worker_when_no_secret_configured(
|
|
|
|
self,
|
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Test that we aren't able to proxy any outbound federation requests when
|
|
|
|
`worker_replication_secret` is not configured.
|
|
|
|
"""
|
|
|
|
with self.assertRaises(ConfigError):
|
|
|
|
# Create the `federation_sender` worker
|
|
|
|
self.make_worker_hs(
|
|
|
|
"synapse.app.generic_worker",
|
|
|
|
{
|
|
|
|
"worker_name": "federation_sender",
|
|
|
|
# Test that we aren't able to proxy any outbound federation requests
|
|
|
|
# when `worker_replication_secret` is not configured.
|
|
|
|
"worker_replication_secret": None,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
@override_config(
|
|
|
|
{
|
|
|
|
"outbound_federation_restricted_to": ["federation_sender"],
|
|
|
|
"worker_replication_secret": "secret",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
def test_not_able_to_proxy_requests_through_federation_sender_worker_when_wrong_auth_given(
|
|
|
|
self,
|
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Test that we aren't able to proxy any outbound federation requests when the
|
|
|
|
wrong authorization is given.
|
|
|
|
"""
|
|
|
|
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
|
|
|
# so we can act like some remote server responding to requests
|
|
|
|
mock_client_on_federation_sender = Mock()
|
|
|
|
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
|
|
|
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
|
|
|
|
|
|
|
# Create the `federation_sender` worker
|
|
|
|
self.make_worker_hs(
|
|
|
|
"synapse.app.generic_worker",
|
|
|
|
{
|
|
|
|
"worker_name": "federation_sender",
|
|
|
|
# Test that we aren't able to proxy any outbound federation requests
|
|
|
|
# when `worker_replication_secret` is wrong.
|
|
|
|
"worker_replication_secret": "wrong",
|
|
|
|
},
|
|
|
|
federation_http_client=mock_client_on_federation_sender,
|
|
|
|
)
|
|
|
|
|
|
|
|
# This federation request from the main process should be proxied through the
|
|
|
|
# `federation_sender` worker off but will fail here because it's using the wrong
|
|
|
|
# authorization.
|
|
|
|
test_request_from_main_process_d = defer.ensureDeferred(
|
|
|
|
self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
|
|
|
|
)
|
|
|
|
|
|
|
|
# Pump the reactor so our deferred goes through the motions. We pump with 10
|
|
|
|
# seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries
|
|
|
|
# and finally passes along the error response.
|
|
|
|
self.pump(0.1)
|
|
|
|
|
|
|
|
# Make sure that the request was *NOT* proxied through the `federation_sender`
|
|
|
|
# worker
|
|
|
|
mock_agent_on_federation_sender.request.assert_not_called()
|
|
|
|
|
|
|
|
failure_res = self.failureResultOf(test_request_from_main_process_d)
|
|
|
|
self.assertIsInstance(failure_res.value, HttpResponseException)
|
|
|
|
self.assertEqual(failure_res.value.code, 401)
|