2015-01-26 05:45:24 -05:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2015, 2016 OpenMarket Ltd
|
2015-01-26 05:45:24 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
|
2017-12-30 13:40:19 -05:00
|
|
|
import copy
|
|
|
|
import itertools
|
|
|
|
import logging
|
|
|
|
import random
|
|
|
|
|
2018-04-28 07:57:00 -04:00
|
|
|
from six.moves import range
|
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
from prometheus_client import Counter
|
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
from twisted.internet import defer
|
|
|
|
|
2018-08-06 10:15:19 -04:00
|
|
|
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
|
2015-03-05 11:08:02 -05:00
|
|
|
from synapse.api.errors import (
|
2018-07-09 02:09:20 -04:00
|
|
|
CodeMessageException,
|
|
|
|
FederationDeniedError,
|
|
|
|
HttpResponseException,
|
|
|
|
SynapseError,
|
2015-03-05 11:08:02 -05:00
|
|
|
)
|
2017-12-30 13:40:19 -05:00
|
|
|
from synapse.events import builder
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
2017-12-30 13:40:19 -05:00
|
|
|
from synapse.util import logcontext, unwrapFirstError
|
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
2018-04-27 06:29:27 -04:00
|
|
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
2017-12-30 13:40:19 -05:00
|
|
|
from synapse.util.logutils import log_function
|
2017-03-22 20:12:21 -04:00
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
2015-02-17 12:20:56 -05:00
|
|
|
|
2018-05-21 20:47:37 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
2015-03-10 11:29:22 -04:00
|
|
|
|
2018-05-21 20:47:37 -04:00
|
|
|
sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
|
2015-02-24 13:10:44 -05:00
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2016-08-10 06:31:46 -04:00
|
|
|
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
|
|
|
|
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
class InvalidResponseError(RuntimeError):
|
|
|
|
"""Helper for _try_destination_list: indicates that the server returned a response
|
|
|
|
we couldn't parse
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-02-03 09:58:30 -05:00
|
|
|
class FederationClient(FederationBase):
|
2016-06-15 10:12:59 -04:00
|
|
|
def __init__(self, hs):
|
|
|
|
super(FederationClient, self).__init__(hs)
|
2015-02-16 13:02:39 -05:00
|
|
|
|
2016-08-10 06:31:46 -04:00
|
|
|
self.pdu_destination_tried = {}
|
|
|
|
self._clock.looping_call(
|
|
|
|
self._clear_tried_cache, 60 * 1000,
|
|
|
|
)
|
2016-08-26 09:54:30 -04:00
|
|
|
self.state = hs.get_state_handler()
|
2018-03-12 10:07:39 -04:00
|
|
|
self.transport_layer = hs.get_federation_transport_client()
|
2016-08-10 06:31:46 -04:00
|
|
|
|
2018-09-21 09:19:46 -04:00
|
|
|
self._get_pdu_cache = ExpiringCache(
|
|
|
|
cache_name="get_pdu_cache",
|
|
|
|
clock=self._clock,
|
|
|
|
max_len=1000,
|
|
|
|
expiry_ms=120 * 1000,
|
|
|
|
reset_expiry_on_get=False,
|
|
|
|
)
|
|
|
|
|
2016-08-10 06:31:46 -04:00
|
|
|
def _clear_tried_cache(self):
|
|
|
|
"""Clear pdu_destination_tried cache"""
|
|
|
|
now = self._clock.time_msec()
|
|
|
|
|
|
|
|
old_dict = self.pdu_destination_tried
|
|
|
|
self.pdu_destination_tried = {}
|
|
|
|
|
|
|
|
for event_id, destination_dict in old_dict.items():
|
|
|
|
destination_dict = {
|
|
|
|
dest: time
|
|
|
|
for dest, time in destination_dict.items()
|
|
|
|
if time + PDU_RETRY_TIME_MS > now
|
|
|
|
}
|
|
|
|
if destination_dict:
|
|
|
|
self.pdu_destination_tried[event_id] = destination_dict
|
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
@log_function
|
|
|
|
def make_query(self, destination, query_type, args,
|
2017-03-23 07:10:36 -04:00
|
|
|
retry_on_dns_fail=False, ignore_backoff=False):
|
2015-01-26 05:45:24 -05:00
|
|
|
"""Sends a federation Query to a remote homeserver of the given type
|
|
|
|
and arguments.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): Domain name of the remote homeserver
|
|
|
|
query_type (str): Category of the query type; should match the
|
|
|
|
handler name used in register_query_handler().
|
|
|
|
args (dict): Mapping of strings to strings containing the details
|
|
|
|
of the query request.
|
2017-03-23 07:10:36 -04:00
|
|
|
ignore_backoff (bool): true to ignore the historical backoff data
|
|
|
|
and try the request anyway.
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
a Deferred which will eventually yield a JSON object from the
|
|
|
|
response
|
|
|
|
"""
|
2018-05-21 20:47:37 -04:00
|
|
|
sent_queries_counter.labels(query_type).inc()
|
2015-03-10 11:29:22 -04:00
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
return self.transport_layer.make_query(
|
2017-03-23 07:10:36 -04:00
|
|
|
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
|
|
|
|
ignore_backoff=ignore_backoff,
|
2015-01-26 05:45:24 -05:00
|
|
|
)
|
|
|
|
|
2015-07-23 11:03:38 -04:00
|
|
|
@log_function
|
2016-09-12 13:17:09 -04:00
|
|
|
def query_client_keys(self, destination, content, timeout):
|
2015-07-23 11:03:38 -04:00
|
|
|
"""Query device keys for a device hosted on a remote server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): Domain name of the remote homeserver
|
|
|
|
content (dict): The query content.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a Deferred which will eventually yield a JSON object from the
|
|
|
|
response
|
|
|
|
"""
|
2018-05-21 20:47:37 -04:00
|
|
|
sent_queries_counter.labels("client_device_keys").inc()
|
2016-09-12 13:17:09 -04:00
|
|
|
return self.transport_layer.query_client_keys(
|
|
|
|
destination, content, timeout
|
|
|
|
)
|
2015-07-23 11:03:38 -04:00
|
|
|
|
2017-01-26 11:06:54 -05:00
|
|
|
@log_function
|
|
|
|
def query_user_devices(self, destination, user_id, timeout=30000):
|
|
|
|
"""Query the device keys for a list of user ids hosted on a remote
|
|
|
|
server.
|
|
|
|
"""
|
2018-05-21 20:47:37 -04:00
|
|
|
sent_queries_counter.labels("user_devices").inc()
|
2017-01-26 11:06:54 -05:00
|
|
|
return self.transport_layer.query_user_devices(
|
|
|
|
destination, user_id, timeout
|
|
|
|
)
|
|
|
|
|
2015-07-23 11:03:38 -04:00
|
|
|
@log_function
|
2016-09-12 13:17:09 -04:00
|
|
|
def claim_client_keys(self, destination, content, timeout):
|
2015-07-23 11:03:38 -04:00
|
|
|
"""Claims one-time keys for a device hosted on a remote server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): Domain name of the remote homeserver
|
|
|
|
content (dict): The query content.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a Deferred which will eventually yield a JSON object from the
|
|
|
|
response
|
|
|
|
"""
|
2018-05-21 20:47:37 -04:00
|
|
|
sent_queries_counter.labels("client_one_time_keys").inc()
|
2016-09-12 13:17:09 -04:00
|
|
|
return self.transport_layer.claim_client_keys(
|
|
|
|
destination, content, timeout
|
|
|
|
)
|
2015-07-23 11:03:38 -04:00
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def backfill(self, dest, context, limit, extremities):
|
|
|
|
"""Requests some more historic PDUs for the given context from the
|
|
|
|
given destination server.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
dest (str): The remote home server to ask.
|
|
|
|
context (str): The context to backfill.
|
|
|
|
limit (int): The maximum number of PDUs to return.
|
|
|
|
extremities (list): List of PDU id and origins of the first pdus
|
|
|
|
we have seen from the context
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: Results in the received PDUs.
|
|
|
|
"""
|
|
|
|
logger.debug("backfill extrem=%s", extremities)
|
|
|
|
|
|
|
|
# If there are no extremeties then we've (probably) reached the start.
|
|
|
|
if not extremities:
|
|
|
|
return
|
|
|
|
|
|
|
|
transaction_data = yield self.transport_layer.backfill(
|
|
|
|
dest, context, extremities, limit)
|
|
|
|
|
|
|
|
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
|
|
|
|
|
|
|
pdus = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(p, outlier=False)
|
2015-01-26 05:45:24 -05:00
|
|
|
for p in transaction_data["pdus"]
|
|
|
|
]
|
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
# FIXME: We should handle signature failures more gracefully.
|
2017-09-19 20:32:42 -04:00
|
|
|
pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
2015-06-26 04:52:24 -04:00
|
|
|
self._check_sigs_and_hashes(pdus),
|
2015-05-20 06:59:02 -04:00
|
|
|
consumeErrors=True,
|
2017-09-19 20:32:42 -04:00
|
|
|
).addErrback(unwrapFirstError))
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
defer.returnValue(pdus)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2015-05-19 09:53:32 -04:00
|
|
|
def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
|
2015-01-26 05:45:24 -05:00
|
|
|
"""Requests the PDU with given origin and ID from the remote home
|
|
|
|
servers.
|
|
|
|
|
|
|
|
Will attempt to get the PDU from each destination in the list until
|
|
|
|
one succeeds.
|
|
|
|
|
|
|
|
This will persist the PDU locally upon receipt.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations (list): Which home servers to query
|
2017-03-09 08:15:12 -05:00
|
|
|
event_id (str): event to fetch
|
2015-01-26 05:45:24 -05:00
|
|
|
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
|
|
|
it's from an arbitary point in the context as opposed to part
|
|
|
|
of the current block of PDUs. Defaults to `False`
|
2015-05-22 10:18:04 -04:00
|
|
|
timeout (int): How long to try (in ms) each destination for before
|
|
|
|
moving to the next destination. None indicates no timeout.
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: Results in the requested PDU.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# TODO: Rate limit the number of times we try and get the same event.
|
|
|
|
|
2018-09-21 09:19:46 -04:00
|
|
|
ev = self._get_pdu_cache.get(event_id)
|
|
|
|
if ev:
|
|
|
|
defer.returnValue(ev)
|
2015-02-16 13:02:39 -05:00
|
|
|
|
2016-08-10 06:31:46 -04:00
|
|
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
|
|
|
|
2016-09-01 05:55:02 -04:00
|
|
|
signed_pdu = None
|
2015-01-26 05:45:24 -05:00
|
|
|
for destination in destinations:
|
2016-08-10 06:31:46 -04:00
|
|
|
now = self._clock.time_msec()
|
|
|
|
last_attempt = pdu_attempts.get(destination, 0)
|
|
|
|
if last_attempt + PDU_RETRY_TIME_MS > now:
|
|
|
|
continue
|
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
try:
|
2017-03-22 20:12:21 -04:00
|
|
|
transaction_data = yield self.transport_layer.get_event(
|
|
|
|
destination, event_id, timeout=timeout,
|
2015-01-26 05:45:24 -05:00
|
|
|
)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2017-03-22 20:12:21 -04:00
|
|
|
logger.debug("transaction_data %r", transaction_data)
|
2015-02-17 12:20:56 -05:00
|
|
|
|
2017-03-22 20:12:21 -04:00
|
|
|
pdu_list = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(p, outlier=outlier)
|
2017-03-22 20:12:21 -04:00
|
|
|
for p in transaction_data["pdus"]
|
|
|
|
]
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2017-03-22 20:12:21 -04:00
|
|
|
if pdu_list and pdu_list[0]:
|
|
|
|
pdu = pdu_list[0]
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2017-03-22 20:12:21 -04:00
|
|
|
# Check signatures are correct.
|
2017-09-19 20:32:42 -04:00
|
|
|
signed_pdu = yield self._check_sigs_and_hash(pdu)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2017-03-22 20:12:21 -04:00
|
|
|
break
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2016-08-10 06:31:46 -04:00
|
|
|
pdu_attempts[destination] = now
|
|
|
|
|
2016-08-10 08:39:12 -04:00
|
|
|
except SynapseError as e:
|
2015-02-16 13:02:39 -05:00
|
|
|
logger.info(
|
|
|
|
"Failed to get PDU %s from %s because %s",
|
|
|
|
event_id, destination, e,
|
|
|
|
)
|
2015-02-17 12:20:56 -05:00
|
|
|
except NotRetryingDestination as e:
|
2018-09-12 09:23:32 -04:00
|
|
|
logger.info(str(e))
|
2015-02-17 12:20:56 -05:00
|
|
|
continue
|
2018-01-22 13:11:18 -05:00
|
|
|
except FederationDeniedError as e:
|
2018-09-12 09:23:32 -04:00
|
|
|
logger.info(str(e))
|
2018-01-22 13:11:18 -05:00
|
|
|
continue
|
2015-01-26 05:45:24 -05:00
|
|
|
except Exception as e:
|
2016-08-10 06:31:46 -04:00
|
|
|
pdu_attempts[destination] = now
|
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
logger.info(
|
|
|
|
"Failed to get PDU %s from %s because %s",
|
|
|
|
event_id, destination, e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2018-09-21 09:19:46 -04:00
|
|
|
if signed_pdu:
|
2016-09-01 05:55:02 -04:00
|
|
|
self._get_pdu_cache[event_id] = signed_pdu
|
2015-02-16 13:02:39 -05:00
|
|
|
|
2016-09-01 05:55:02 -04:00
|
|
|
defer.returnValue(signed_pdu)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def get_state_for_room(self, destination, room_id, event_id):
|
2018-09-25 11:53:55 -04:00
|
|
|
"""Requests all of the room state at a given event from a remote home server.
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str): The remote homeserver to query for the state.
|
|
|
|
room_id (str): The id of the room we're interested in.
|
|
|
|
event_id (str): The id of the event we want the state at.
|
|
|
|
|
|
|
|
Returns:
|
2018-09-25 11:53:55 -04:00
|
|
|
Deferred[Tuple[List[EventBase], List[EventBase]]]:
|
|
|
|
A list of events in the state, and a list of events in the auth chain
|
|
|
|
for the given event.
|
2015-01-26 05:45:24 -05:00
|
|
|
"""
|
2016-08-03 09:47:37 -04:00
|
|
|
try:
|
|
|
|
# First we try and ask for just the IDs, as thats far quicker if
|
|
|
|
# we have most of the state and auth_chain already.
|
|
|
|
# However, this may 404 if the other side has an old synapse.
|
|
|
|
result = yield self.transport_layer.get_room_state_ids(
|
|
|
|
destination, room_id, event_id=event_id,
|
|
|
|
)
|
|
|
|
|
2016-08-03 12:19:15 -04:00
|
|
|
state_event_ids = result["pdu_ids"]
|
|
|
|
auth_event_ids = result.get("auth_chain_ids", [])
|
2016-08-03 09:47:37 -04:00
|
|
|
|
2016-08-03 10:03:15 -04:00
|
|
|
fetched_events, failed_to_fetch = yield self.get_events(
|
2016-08-03 09:47:37 -04:00
|
|
|
[destination], room_id, set(state_event_ids + auth_event_ids)
|
|
|
|
)
|
|
|
|
|
2016-08-03 10:03:15 -04:00
|
|
|
if failed_to_fetch:
|
|
|
|
logger.warn("Failed to get %r", failed_to_fetch)
|
|
|
|
|
|
|
|
event_map = {
|
|
|
|
ev.event_id: ev for ev in fetched_events
|
|
|
|
}
|
|
|
|
|
2016-08-05 13:08:32 -04:00
|
|
|
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
|
|
|
|
auth_chain = [
|
|
|
|
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
|
|
|
|
]
|
2016-08-03 09:47:37 -04:00
|
|
|
|
|
|
|
auth_chain.sort(key=lambda e: e.depth)
|
|
|
|
|
|
|
|
defer.returnValue((pdus, auth_chain))
|
|
|
|
except HttpResponseException as e:
|
2016-08-05 05:24:23 -04:00
|
|
|
if e.code == 400 or e.code == 404:
|
2016-08-03 09:47:37 -04:00
|
|
|
logger.info("Failed to use get_room_state_ids API, falling back")
|
|
|
|
else:
|
|
|
|
raise e
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2016-08-03 10:04:29 -04:00
|
|
|
result = yield self.transport_layer.get_room_state(
|
|
|
|
destination, room_id, event_id=event_id,
|
|
|
|
)
|
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
pdus = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
2015-01-26 05:45:24 -05:00
|
|
|
]
|
|
|
|
|
|
|
|
auth_chain = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(p, outlier=True)
|
2015-01-26 05:45:24 -05:00
|
|
|
for p in result.get("auth_chain", [])
|
|
|
|
]
|
|
|
|
|
2016-08-05 05:43:47 -04:00
|
|
|
seen_events = yield self.store.get_events([
|
|
|
|
ev.event_id for ev in itertools.chain(pdus, auth_chain)
|
|
|
|
])
|
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
2016-08-05 05:43:47 -04:00
|
|
|
destination,
|
|
|
|
[p for p in pdus if p.event_id not in seen_events],
|
|
|
|
outlier=True
|
|
|
|
)
|
|
|
|
signed_pdus.extend(
|
|
|
|
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
2015-02-02 11:56:01 -05:00
|
|
|
)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
2016-08-05 05:43:47 -04:00
|
|
|
destination,
|
2016-08-05 05:48:56 -04:00
|
|
|
[p for p in auth_chain if p.event_id not in seen_events],
|
2016-08-05 05:43:47 -04:00
|
|
|
outlier=True
|
|
|
|
)
|
|
|
|
signed_auth.extend(
|
|
|
|
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
2015-02-02 11:56:01 -05:00
|
|
|
)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
signed_auth.sort(key=lambda e: e.depth)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
defer.returnValue((signed_pdus, signed_auth))
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2016-08-03 09:47:37 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
2016-08-03 09:52:43 -04:00
|
|
|
"""Fetch events from some remote destinations, checking if we already
|
|
|
|
have them.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations (list)
|
|
|
|
room_id (str)
|
|
|
|
event_ids (list)
|
|
|
|
return_local (bool): Whether to include events we already have in
|
|
|
|
the DB in the returned list of events
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
|
|
|
events and the second is a list of event ids that we failed to fetch.
|
|
|
|
"""
|
2016-08-03 09:47:37 -04:00
|
|
|
if return_local:
|
2016-08-10 08:26:13 -04:00
|
|
|
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
2018-05-31 05:03:47 -04:00
|
|
|
signed_events = list(seen_events.values())
|
2016-08-03 09:47:37 -04:00
|
|
|
else:
|
2018-04-17 13:30:53 -04:00
|
|
|
seen_events = yield self.store.have_seen_events(event_ids)
|
2016-08-03 09:47:37 -04:00
|
|
|
signed_events = []
|
|
|
|
|
2016-08-04 09:05:45 -04:00
|
|
|
failed_to_fetch = set()
|
2016-08-03 09:47:37 -04:00
|
|
|
|
|
|
|
missing_events = set(event_ids)
|
|
|
|
for k in seen_events:
|
|
|
|
missing_events.discard(k)
|
|
|
|
|
|
|
|
if not missing_events:
|
|
|
|
defer.returnValue((signed_events, failed_to_fetch))
|
|
|
|
|
|
|
|
def random_server_list():
|
|
|
|
srvs = list(destinations)
|
|
|
|
random.shuffle(srvs)
|
|
|
|
return srvs
|
|
|
|
|
|
|
|
batch_size = 20
|
2016-08-04 09:05:45 -04:00
|
|
|
missing_events = list(missing_events)
|
2018-04-28 07:57:00 -04:00
|
|
|
for i in range(0, len(missing_events), batch_size):
|
2016-08-04 09:01:18 -04:00
|
|
|
batch = set(missing_events[i:i + batch_size])
|
2016-08-03 09:47:37 -04:00
|
|
|
|
|
|
|
deferreds = [
|
2018-04-27 06:29:27 -04:00
|
|
|
run_in_background(
|
|
|
|
self.get_pdu,
|
2016-08-03 09:47:37 -04:00
|
|
|
destinations=random_server_list(),
|
|
|
|
event_id=e_id,
|
2016-08-04 09:01:18 -04:00
|
|
|
)
|
2016-08-03 09:47:37 -04:00
|
|
|
for e_id in batch
|
|
|
|
]
|
|
|
|
|
2017-11-14 06:22:42 -05:00
|
|
|
res = yield make_deferred_yieldable(
|
2016-08-23 10:23:39 -04:00
|
|
|
defer.DeferredList(deferreds, consumeErrors=True)
|
|
|
|
)
|
2016-08-04 09:01:18 -04:00
|
|
|
for success, result in res:
|
2016-09-12 09:59:43 -04:00
|
|
|
if success and result:
|
2016-08-03 12:17:26 -04:00
|
|
|
signed_events.append(result)
|
2016-08-04 09:01:18 -04:00
|
|
|
batch.discard(result.event_id)
|
|
|
|
|
|
|
|
# We removed all events we successfully fetched from `batch`
|
|
|
|
failed_to_fetch.update(batch)
|
2016-08-03 09:47:37 -04:00
|
|
|
|
|
|
|
defer.returnValue((signed_events, failed_to_fetch))
|
|
|
|
|
2015-01-26 05:45:24 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
|
|
|
def get_event_auth(self, destination, room_id, event_id):
|
|
|
|
res = yield self.transport_layer.get_event_auth(
|
|
|
|
destination, room_id, event_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
auth_chain = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(p, outlier=True)
|
2015-01-26 05:45:24 -05:00
|
|
|
for p in res["auth_chain"]
|
|
|
|
]
|
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
2015-02-02 12:06:37 -05:00
|
|
|
destination, auth_chain, outlier=True
|
2015-02-02 11:56:01 -05:00
|
|
|
)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
signed_auth.sort(key=lambda e: e.depth)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
defer.returnValue(signed_auth)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2018-08-01 06:24:19 -04:00
|
|
|
def _try_destination_list(self, description, destinations, callback):
|
|
|
|
"""Try an operation on a series of servers, until it succeeds
|
|
|
|
|
|
|
|
Args:
|
|
|
|
description (unicode): description of the operation we're doing, for logging
|
|
|
|
|
|
|
|
destinations (Iterable[unicode]): list of server_names to try
|
|
|
|
|
|
|
|
callback (callable): Function to run for each server. Passed a single
|
|
|
|
argument: the server_name to try. May return a deferred.
|
|
|
|
|
|
|
|
If the callback raises a CodeMessageException with a 300/400 code,
|
|
|
|
attempts to perform the operation stop immediately and the exception is
|
|
|
|
reraised.
|
|
|
|
|
|
|
|
Otherwise, if the callback raises an Exception the error is logged and the
|
|
|
|
next server tried. Normally the stacktrace is logged but this is
|
|
|
|
suppressed if the exception is an InvalidResponseError.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The [Deferred] result of callback, if it succeeds
|
|
|
|
|
|
|
|
Raises:
|
2018-08-01 08:47:07 -04:00
|
|
|
SynapseError if the chosen remote server returns a 300/400 code.
|
2018-08-01 06:24:19 -04:00
|
|
|
|
|
|
|
RuntimeError if no servers were reachable.
|
|
|
|
"""
|
|
|
|
for destination in destinations:
|
|
|
|
if destination == self.server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
res = yield callback(destination)
|
|
|
|
defer.returnValue(res)
|
|
|
|
except InvalidResponseError as e:
|
|
|
|
logger.warn(
|
|
|
|
"Failed to %s via %s: %s",
|
|
|
|
description, destination, e,
|
|
|
|
)
|
2018-08-01 08:47:07 -04:00
|
|
|
except HttpResponseException as e:
|
2018-08-01 06:24:19 -04:00
|
|
|
if not 500 <= e.code < 600:
|
2018-08-01 09:58:16 -04:00
|
|
|
raise e.to_synapse_error()
|
2018-08-01 06:24:19 -04:00
|
|
|
else:
|
|
|
|
logger.warn(
|
|
|
|
"Failed to %s via %s: %i %s",
|
2018-09-12 09:23:32 -04:00
|
|
|
description, destination, e.code, e.args[0],
|
2018-08-01 06:24:19 -04:00
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
logger.warn(
|
|
|
|
"Failed to %s via %s",
|
|
|
|
description, destination, exc_info=1,
|
|
|
|
)
|
|
|
|
|
2018-08-06 10:15:19 -04:00
|
|
|
raise RuntimeError("Failed to %s via any server" % (description, ))
|
2018-08-01 06:24:19 -04:00
|
|
|
|
2015-11-12 11:19:55 -05:00
|
|
|
def make_membership_event(self, destinations, room_id, user_id, membership,
|
2018-08-06 08:45:37 -04:00
|
|
|
content, params):
|
2015-10-20 06:58:58 -04:00
|
|
|
"""
|
|
|
|
Creates an m.room.member event, with context, without participating in the room.
|
|
|
|
|
|
|
|
Does so by asking one of the already participating servers to create an
|
|
|
|
event with proper context.
|
|
|
|
|
|
|
|
Note that this does not append any events to any graphs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations (str): Candidate homeservers which are probably
|
|
|
|
participating in the room.
|
|
|
|
room_id (str): The room in which the event will happen.
|
|
|
|
user_id (str): The user whose membership is being evented.
|
|
|
|
membership (str): The "membership" property of the event. Must be
|
|
|
|
one of "join" or "leave".
|
2018-08-06 08:45:37 -04:00
|
|
|
content (dict): Any additional data to put into the content field
|
2015-10-20 06:58:58 -04:00
|
|
|
of the event.
|
2018-08-06 08:45:37 -04:00
|
|
|
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
|
|
|
request.
|
2015-10-20 06:58:58 -04:00
|
|
|
Return:
|
2017-04-20 19:46:54 -04:00
|
|
|
Deferred: resolves to a tuple of (origin (str), event (object))
|
|
|
|
where origin is the remote homeserver which generated the event.
|
|
|
|
|
2018-08-01 08:47:07 -04:00
|
|
|
Fails with a ``SynapseError`` if the chosen remote server
|
2017-04-20 19:46:54 -04:00
|
|
|
returns a 300/400 code.
|
|
|
|
|
|
|
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
2015-10-20 06:58:58 -04:00
|
|
|
"""
|
|
|
|
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
|
|
|
if membership not in valid_memberships:
|
|
|
|
raise RuntimeError(
|
|
|
|
"make_membership_event called with membership='%s', must be one of %s" %
|
|
|
|
(membership, ",".join(valid_memberships))
|
|
|
|
)
|
2015-06-26 04:52:24 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_request(destination):
|
|
|
|
ret = yield self.transport_layer.make_membership_event(
|
2018-08-06 08:45:37 -04:00
|
|
|
destination, room_id, user_id, membership, params,
|
2018-08-01 06:24:19 -04:00
|
|
|
)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 10:35:29 -04:00
|
|
|
pdu_dict = ret.get("event", None)
|
|
|
|
if not isinstance(pdu_dict, dict):
|
|
|
|
raise InvalidResponseError("Bad 'event' field in response")
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
pdu_dict["content"].update(content)
|
2015-11-12 11:19:55 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
# The protoevent received over the JSON wire may not have all
|
|
|
|
# the required fields. Lets just gloss over that because
|
|
|
|
# there's some we never care about
|
|
|
|
if "prev_state" not in pdu_dict:
|
|
|
|
pdu_dict["prev_state"] = []
|
2015-11-13 12:27:25 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
ev = builder.EventBuilder(pdu_dict)
|
2017-01-13 08:21:04 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
defer.returnValue(
|
|
|
|
(destination, ev)
|
|
|
|
)
|
2015-02-04 11:28:12 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
return self._try_destination_list(
|
|
|
|
"make_" + membership, destinations, send_request,
|
|
|
|
)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2015-02-04 11:28:12 -05:00
|
|
|
def send_join(self, destinations, pdu):
|
2017-04-20 19:46:54 -04:00
|
|
|
"""Sends a join event to one of a list of homeservers.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations (str): Candidate homeservers which are probably
|
|
|
|
participating in the room.
|
|
|
|
pdu (BaseEvent): event to be sent
|
|
|
|
|
|
|
|
Return:
|
|
|
|
Deferred: resolves to a dict with members ``origin`` (a string
|
|
|
|
giving the serer the event was sent to, ``state`` (?) and
|
|
|
|
``auth_chain``.
|
|
|
|
|
2018-08-01 08:47:07 -04:00
|
|
|
Fails with a ``SynapseError`` if the chosen remote server
|
2017-04-20 19:46:54 -04:00
|
|
|
returns a 300/400 code.
|
|
|
|
|
|
|
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
|
|
|
"""
|
|
|
|
|
2018-08-06 10:15:19 -04:00
|
|
|
def check_authchain_validity(signed_auth_chain):
|
|
|
|
for e in signed_auth_chain:
|
|
|
|
if e.type == EventTypes.Create:
|
|
|
|
create_event = e
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"no %s in auth chain" % (EventTypes.Create,),
|
|
|
|
)
|
|
|
|
|
|
|
|
# the room version should be sane.
|
|
|
|
room_version = create_event.content.get("room_version", "1")
|
|
|
|
if room_version not in KNOWN_ROOM_VERSIONS:
|
|
|
|
# This shouldn't be possible, because the remote server should have
|
|
|
|
# rejected the join attempt during make_join.
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"room appears to have unsupported version %s" % (
|
|
|
|
room_version,
|
|
|
|
))
|
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_request(destination):
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
_, content = yield self.transport_layer.send_join(
|
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
logger.debug("Got content: %s", content)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
state = [
|
|
|
|
event_from_pdu_json(p, outlier=True)
|
|
|
|
for p in content.get("state", [])
|
|
|
|
]
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
auth_chain = [
|
|
|
|
event_from_pdu_json(p, outlier=True)
|
|
|
|
for p in content.get("auth_chain", [])
|
|
|
|
]
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
pdus = {
|
|
|
|
p.event_id: p
|
|
|
|
for p in itertools.chain(state, auth_chain)
|
|
|
|
}
|
2015-06-26 04:52:24 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
|
|
|
destination, list(pdus.values()),
|
|
|
|
outlier=True,
|
|
|
|
)
|
2015-06-26 04:52:24 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
valid_pdus_map = {
|
|
|
|
p.event_id: p
|
|
|
|
for p in valid_pdus
|
|
|
|
}
|
2015-06-26 04:52:24 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
# NB: We *need* to copy to ensure that we don't have multiple
|
|
|
|
# references being passed on, as that causes... issues.
|
|
|
|
signed_state = [
|
|
|
|
copy.copy(valid_pdus_map[p.event_id])
|
|
|
|
for p in state
|
|
|
|
if p.event_id in valid_pdus_map
|
|
|
|
]
|
2015-06-26 04:52:24 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
signed_auth = [
|
|
|
|
valid_pdus_map[p.event_id]
|
|
|
|
for p in auth_chain
|
|
|
|
if p.event_id in valid_pdus_map
|
|
|
|
]
|
2015-01-26 09:33:11 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
# NB: We *need* to copy to ensure that we don't have multiple
|
|
|
|
# references being passed on, as that causes... issues.
|
|
|
|
for s in signed_state:
|
|
|
|
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
2015-02-04 11:28:12 -05:00
|
|
|
|
2018-08-06 10:15:19 -04:00
|
|
|
check_authchain_validity(signed_auth)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
defer.returnValue({
|
|
|
|
"state": signed_state,
|
|
|
|
"auth_chain": signed_auth,
|
|
|
|
"origin": destination,
|
|
|
|
})
|
|
|
|
return self._try_destination_list("send_join", destinations, send_request)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_invite(self, destination, room_id, event_id, pdu):
|
|
|
|
time_now = self._clock.time_msec()
|
2018-08-01 08:47:07 -04:00
|
|
|
try:
|
|
|
|
code, content = yield self.transport_layer.send_invite(
|
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
event_id=event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
|
|
|
if e.code == 403:
|
2018-08-01 09:58:16 -04:00
|
|
|
raise e.to_synapse_error()
|
2018-08-01 08:47:07 -04:00
|
|
|
raise
|
2015-01-26 05:45:24 -05:00
|
|
|
|
|
|
|
pdu_dict = content["event"]
|
|
|
|
|
|
|
|
logger.debug("Got response to send_invite: %s", pdu_dict)
|
|
|
|
|
2017-12-30 13:40:19 -05:00
|
|
|
pdu = event_from_pdu_json(pdu_dict)
|
2015-01-26 09:33:11 -05:00
|
|
|
|
|
|
|
# Check signatures are correct.
|
|
|
|
pdu = yield self._check_sigs_and_hash(pdu)
|
|
|
|
|
|
|
|
# FIXME: We should handle signature failures more gracefully.
|
|
|
|
|
|
|
|
defer.returnValue(pdu)
|
2015-01-26 05:45:24 -05:00
|
|
|
|
2015-10-20 06:58:58 -04:00
|
|
|
def send_leave(self, destinations, pdu):
|
2017-04-20 19:46:54 -04:00
|
|
|
"""Sends a leave event to one of a list of homeservers.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
This is mostly useful to reject received invites.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations (str): Candidate homeservers which are probably
|
|
|
|
participating in the room.
|
|
|
|
pdu (BaseEvent): event to be sent
|
|
|
|
|
|
|
|
Return:
|
|
|
|
Deferred: resolves to None.
|
|
|
|
|
2018-08-01 08:47:07 -04:00
|
|
|
Fails with a ``SynapseError`` if the chosen remote server
|
2018-08-01 06:24:19 -04:00
|
|
|
returns a 300/400 code.
|
2017-04-20 19:46:54 -04:00
|
|
|
|
|
|
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
|
|
|
"""
|
2018-08-01 06:24:19 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_request(destination):
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
_, content = yield self.transport_layer.send_leave(
|
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
2015-10-20 06:58:58 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
logger.debug("Got content: %s", content)
|
|
|
|
defer.returnValue(None)
|
2015-10-20 06:58:58 -04:00
|
|
|
|
2018-08-01 06:24:19 -04:00
|
|
|
return self._try_destination_list("send_leave", destinations, send_request)
|
2015-10-20 06:58:58 -04:00
|
|
|
|
2016-09-16 05:24:15 -04:00
|
|
|
def get_public_rooms(self, destination, limit=None, since_token=None,
|
2016-12-06 05:43:48 -05:00
|
|
|
search_filter=None, include_all_networks=False,
|
|
|
|
third_party_instance_id=None):
|
2016-09-15 05:36:19 -04:00
|
|
|
if destination == self.server_name:
|
|
|
|
return
|
2016-05-31 12:20:07 -04:00
|
|
|
|
2016-09-16 05:24:15 -04:00
|
|
|
return self.transport_layer.get_public_rooms(
|
2016-12-06 05:43:48 -05:00
|
|
|
destination, limit, since_token, search_filter,
|
|
|
|
include_all_networks=include_all_networks,
|
|
|
|
third_party_instance_id=third_party_instance_id,
|
2016-09-16 05:24:15 -04:00
|
|
|
)
|
2016-05-31 12:20:07 -04:00
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def query_auth(self, destination, room_id, event_id, local_auth):
|
|
|
|
"""
|
|
|
|
Params:
|
|
|
|
destination (str)
|
|
|
|
event_it (str)
|
|
|
|
local_auth (list)
|
|
|
|
"""
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
send_content = {
|
|
|
|
"auth_chain": [e.get_pdu_json(time_now) for e in local_auth],
|
|
|
|
}
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
code, content = yield self.transport_layer.send_query_auth(
|
2015-01-28 11:16:53 -05:00
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
event_id=event_id,
|
|
|
|
content=send_content,
|
|
|
|
)
|
|
|
|
|
|
|
|
auth_chain = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(e)
|
2015-01-28 11:16:53 -05:00
|
|
|
for e in content["auth_chain"]
|
|
|
|
]
|
|
|
|
|
2015-02-02 11:56:01 -05:00
|
|
|
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
2015-02-02 12:06:37 -05:00
|
|
|
destination, auth_chain, outlier=True
|
2015-02-02 11:56:01 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
signed_auth.sort(key=lambda e: e.depth)
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
ret = {
|
2015-02-02 11:56:01 -05:00
|
|
|
"auth_chain": signed_auth,
|
2015-01-28 11:16:53 -05:00
|
|
|
"rejects": content.get("rejects", []),
|
2015-01-30 08:34:01 -05:00
|
|
|
"missing": content.get("missing", []),
|
2015-01-28 11:16:53 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
defer.returnValue(ret)
|
|
|
|
|
2015-02-23 08:58:02 -05:00
|
|
|
@defer.inlineCallbacks
|
2015-03-05 11:08:02 -05:00
|
|
|
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
2016-12-31 10:21:37 -05:00
|
|
|
latest_events, limit, min_depth, timeout):
|
2015-03-05 11:31:13 -05:00
|
|
|
"""Tries to fetch events we are missing. This is called when we receive
|
|
|
|
an event without having received all of its ancestors.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination (str)
|
|
|
|
room_id (str)
|
|
|
|
earliest_events_ids (list): List of event ids. Effectively the
|
|
|
|
events we expected to receive, but haven't. `get_missing_events`
|
|
|
|
should only return events that didn't happen before these.
|
|
|
|
latest_events (list): List of events we have received that we don't
|
|
|
|
have all previous events for.
|
|
|
|
limit (int): Maximum number of events to return.
|
|
|
|
min_depth (int): Minimum depth of events tor return.
|
2016-12-31 10:21:37 -05:00
|
|
|
timeout (int): Max time to wait in ms
|
2015-03-05 11:31:13 -05:00
|
|
|
"""
|
2015-03-05 11:08:02 -05:00
|
|
|
try:
|
|
|
|
content = yield self.transport_layer.get_missing_events(
|
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
earliest_events=earliest_events_ids,
|
|
|
|
latest_events=[e.event_id for e in latest_events],
|
|
|
|
limit=limit,
|
|
|
|
min_depth=min_depth,
|
2016-12-31 10:21:37 -05:00
|
|
|
timeout=timeout,
|
2015-03-05 11:08:02 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
events = [
|
2017-12-30 13:40:19 -05:00
|
|
|
event_from_pdu_json(e)
|
2015-03-05 11:08:02 -05:00
|
|
|
for e in content.get("events", [])
|
|
|
|
]
|
|
|
|
|
|
|
|
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
2015-05-05 12:36:57 -04:00
|
|
|
destination, events, outlier=False
|
2015-03-05 11:08:02 -05:00
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
|
|
|
if not e.code == 400:
|
|
|
|
raise
|
2015-02-23 08:58:02 -05:00
|
|
|
|
2015-03-05 11:31:13 -05:00
|
|
|
# We are probably hitting an old server that doesn't support
|
|
|
|
# get_missing_events
|
2015-03-05 11:08:02 -05:00
|
|
|
signed_events = []
|
|
|
|
|
2015-02-23 08:58:02 -05:00
|
|
|
defer.returnValue(signed_events)
|
|
|
|
|
2015-11-05 11:43:19 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
|
|
|
for destination in destinations:
|
|
|
|
if destination == self.server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield self.transport_layer.exchange_third_party_invite(
|
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
event_dict=event_dict,
|
|
|
|
)
|
|
|
|
defer.returnValue(None)
|
|
|
|
except CodeMessageException:
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception(
|
|
|
|
"Failed to send_third_party_invite via %s: %s",
|
2018-09-12 09:23:32 -04:00
|
|
|
destination, str(e)
|
2015-11-05 11:43:19 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
raise RuntimeError("Failed to send to any server.")
|