2014-08-12 10:10:52 -04:00
|
|
|
# -*- coding: utf-8 -*-
|
2015-01-06 08:21:39 -05:00
|
|
|
# Copyright 2014, 2015 OpenMarket Ltd
|
2014-08-12 10:10:52 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-08-12 22:14:34 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
"""Contains handlers for federation events."""
|
|
|
|
|
|
|
|
from ._base import BaseHandler
|
|
|
|
|
2014-11-26 11:06:20 -05:00
|
|
|
from synapse.api.errors import (
|
2015-05-12 05:35:45 -04:00
|
|
|
AuthError, FederationError, StoreError, CodeMessageException, SynapseError,
|
2014-11-26 11:06:20 -05:00
|
|
|
)
|
2015-01-28 11:16:53 -05:00
|
|
|
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
2015-05-12 08:14:29 -04:00
|
|
|
from synapse.util import unwrapFirstError
|
2015-05-08 14:53:34 -04:00
|
|
|
from synapse.util.logcontext import PreserveLoggingContext
|
2014-08-12 10:10:52 -04:00
|
|
|
from synapse.util.logutils import log_function
|
2014-10-29 12:59:24 -04:00
|
|
|
from synapse.util.async import run_on_reactor
|
2015-02-11 10:44:28 -05:00
|
|
|
from synapse.util.frozenutils import unfreeze
|
2014-11-14 11:45:39 -05:00
|
|
|
from synapse.crypto.event_signing import (
|
2015-01-28 11:16:53 -05:00
|
|
|
compute_event_signature, add_hashes_and_signatures,
|
2014-11-14 11:45:39 -05:00
|
|
|
)
|
2015-01-23 06:47:15 -05:00
|
|
|
from synapse.types import UserID
|
2014-08-26 14:49:42 -04:00
|
|
|
|
2015-07-03 12:52:57 -04:00
|
|
|
from synapse.events.utils import prune_event
|
|
|
|
|
2015-05-12 05:35:45 -04:00
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
|
|
|
|
2014-11-04 09:14:02 -05:00
|
|
|
from twisted.internet import defer
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2015-02-03 11:12:04 -05:00
|
|
|
import itertools
|
2014-08-12 10:10:52 -04:00
|
|
|
import logging
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class FederationHandler(BaseHandler):
|
2014-08-26 14:49:42 -04:00
|
|
|
"""Handles events that originated from federation.
|
|
|
|
Responsible for:
|
|
|
|
a) handling received Pdus before handing them on as Events to the rest
|
|
|
|
of the home server (including auth and state conflict resoultion)
|
|
|
|
b) converting events that were produced by local clients that may need
|
|
|
|
to be sent to remote home servers.
|
2014-11-12 11:20:21 -05:00
|
|
|
c) doing the necessary dances to invite remote users and join remote
|
|
|
|
rooms.
|
2014-08-26 14:49:42 -04:00
|
|
|
"""
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-08-21 09:38:22 -04:00
|
|
|
def __init__(self, hs):
|
|
|
|
super(FederationHandler, self).__init__(hs)
|
|
|
|
|
|
|
|
self.distributor.observe(
|
|
|
|
"user_joined_room",
|
|
|
|
self._on_user_joined
|
|
|
|
)
|
|
|
|
|
|
|
|
self.waiting_for_join_list = {}
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-08-26 14:49:42 -04:00
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self.replication_layer = hs.get_replication_layer()
|
|
|
|
self.state_handler = hs.get_state_handler()
|
|
|
|
# self.auth_handler = gs.get_auth_handler()
|
|
|
|
self.server_name = hs.hostname
|
2014-11-14 11:45:39 -05:00
|
|
|
self.keyring = hs.get_keyring()
|
2014-08-26 14:49:42 -04:00
|
|
|
|
|
|
|
self.lock_manager = hs.get_room_lock_manager()
|
|
|
|
|
|
|
|
self.replication_layer.set_handler(self)
|
|
|
|
|
2014-10-17 13:56:42 -04:00
|
|
|
# When joining a room we need to queue any events for that room up
|
|
|
|
self.room_queues = {}
|
|
|
|
|
2015-01-16 13:59:04 -05:00
|
|
|
def handle_new_event(self, event, destinations):
|
2014-08-26 14:49:42 -04:00
|
|
|
""" Takes in an event from the client to server side, that has already
|
|
|
|
been authed and handled by the state module, and sends it to any
|
|
|
|
remote home servers that may be interested.
|
|
|
|
|
|
|
|
Args:
|
2015-01-16 13:59:04 -05:00
|
|
|
event: The event to send
|
|
|
|
destinations: A list of destinations to send it to
|
2014-08-26 14:49:42 -04:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: Resolved when it has successfully been queued for
|
|
|
|
processing.
|
|
|
|
"""
|
|
|
|
|
2015-05-01 09:41:16 -04:00
|
|
|
return self.replication_layer.send_pdu(event, destinations)
|
2014-08-26 14:49:42 -04:00
|
|
|
|
|
|
|
@log_function
|
|
|
|
@defer.inlineCallbacks
|
2014-12-18 13:47:13 -05:00
|
|
|
def on_receive_pdu(self, origin, pdu, backfilled, state=None,
|
|
|
|
auth_chain=None):
|
2014-08-26 14:49:42 -04:00
|
|
|
""" Called by the ReplicationLayer when we have a new pdu. We need to
|
2014-10-16 11:56:51 -04:00
|
|
|
do auth checks and put it through the StateHandler.
|
2014-08-26 14:49:42 -04:00
|
|
|
"""
|
2014-11-14 16:25:02 -05:00
|
|
|
event = pdu
|
2014-08-26 14:49:42 -04:00
|
|
|
|
2014-09-15 10:18:51 -04:00
|
|
|
logger.debug("Got event: %s", event.event_id)
|
|
|
|
|
2014-11-12 11:20:21 -05:00
|
|
|
# If we are currently in the process of joining this room, then we
|
|
|
|
# queue up events for later processing.
|
2014-10-17 13:56:42 -04:00
|
|
|
if event.room_id in self.room_queues:
|
2014-11-27 09:46:33 -05:00
|
|
|
self.room_queues[event.room_id].append((pdu, origin))
|
2014-10-17 13:56:42 -04:00
|
|
|
return
|
|
|
|
|
2014-10-29 12:59:24 -04:00
|
|
|
logger.debug("Processing event: %s", event.event_id)
|
|
|
|
|
2014-10-16 11:56:51 -04:00
|
|
|
logger.debug("Event: %s", event)
|
|
|
|
|
2014-11-26 05:41:08 -05:00
|
|
|
# FIXME (erikj): Awful hack to make the case where we are not currently
|
|
|
|
# in the room work
|
|
|
|
current_state = None
|
2014-11-27 09:31:43 -05:00
|
|
|
is_in_room = yield self.auth.check_host_in_room(
|
|
|
|
event.room_id,
|
|
|
|
self.server_name
|
|
|
|
)
|
2015-01-30 05:48:47 -05:00
|
|
|
if not is_in_room and not event.internal_metadata.is_outlier():
|
2014-11-27 09:31:43 -05:00
|
|
|
logger.debug("Got event for room we're not in.")
|
2014-11-27 11:02:26 -05:00
|
|
|
current_state = state
|
|
|
|
|
2015-02-03 11:12:04 -05:00
|
|
|
event_ids = set()
|
|
|
|
if state:
|
|
|
|
event_ids |= {e.event_id for e in state}
|
|
|
|
if auth_chain:
|
|
|
|
event_ids |= {e.event_id for e in auth_chain}
|
|
|
|
|
2015-02-06 08:52:16 -05:00
|
|
|
seen_ids = set(
|
|
|
|
(yield self.store.have_events(event_ids)).keys()
|
|
|
|
)
|
2015-02-03 11:12:04 -05:00
|
|
|
|
2015-01-30 08:34:01 -05:00
|
|
|
if state and auth_chain is not None:
|
2015-02-03 11:12:04 -05:00
|
|
|
# If we have any state or auth_chain given to us by the replication
|
|
|
|
# layer, then we should handle them (if we haven't before.)
|
2015-06-25 12:18:19 -04:00
|
|
|
|
|
|
|
event_infos = []
|
|
|
|
|
2015-02-03 11:12:04 -05:00
|
|
|
for e in itertools.chain(auth_chain, state):
|
|
|
|
if e.event_id in seen_ids:
|
|
|
|
continue
|
|
|
|
e.internal_metadata.outlier = True
|
2015-06-25 12:18:19 -04:00
|
|
|
auth_ids = [e_id for e_id, _ in e.auth_events]
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e for e in auth_chain
|
|
|
|
if e.event_id in auth_ids
|
|
|
|
}
|
|
|
|
event_infos.append({
|
|
|
|
"event": e,
|
|
|
|
"auth_events": auth,
|
|
|
|
})
|
|
|
|
seen_ids.add(e.event_id)
|
|
|
|
|
|
|
|
yield self._handle_new_events(
|
|
|
|
origin,
|
|
|
|
event_infos,
|
|
|
|
outliers=True
|
|
|
|
)
|
2014-11-26 05:41:08 -05:00
|
|
|
|
2014-11-04 10:10:43 -05:00
|
|
|
try:
|
2015-05-13 08:42:21 -04:00
|
|
|
_, event_stream_id, max_stream_id = yield self._handle_new_event(
|
2015-01-29 11:50:23 -05:00
|
|
|
origin,
|
2014-11-25 06:31:18 -05:00
|
|
|
event,
|
|
|
|
state=state,
|
2014-11-26 05:41:08 -05:00
|
|
|
backfilled=backfilled,
|
|
|
|
current_state=current_state,
|
2014-11-25 06:31:18 -05:00
|
|
|
)
|
2014-11-04 10:10:43 -05:00
|
|
|
except AuthError as e:
|
|
|
|
raise FederationError(
|
|
|
|
"ERROR",
|
|
|
|
e.code,
|
|
|
|
e.msg,
|
|
|
|
affected=event.event_id,
|
|
|
|
)
|
2014-12-16 10:24:03 -05:00
|
|
|
|
2014-12-07 19:17:12 -05:00
|
|
|
# if we're receiving valid events from an origin,
|
|
|
|
# it's probably a good idea to mark it as not in retry-state
|
|
|
|
# for sending (although this is a bit of a leap)
|
2014-12-08 14:34:51 -05:00
|
|
|
retry_timings = yield self.store.get_destination_retry_timings(origin)
|
2015-03-23 09:43:21 -04:00
|
|
|
if retry_timings and retry_timings["retry_last_ts"]:
|
2014-12-07 19:17:12 -05:00
|
|
|
self.store.set_destination_retry_timings(origin, 0, 0)
|
2014-10-16 11:56:51 -04:00
|
|
|
|
2014-10-17 10:04:17 -04:00
|
|
|
room = yield self.store.get_room(event.room_id)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2014-10-17 10:04:17 -04:00
|
|
|
if not room:
|
2014-11-26 11:06:20 -05:00
|
|
|
try:
|
|
|
|
yield self.store.store_room(
|
|
|
|
room_id=event.room_id,
|
|
|
|
room_creator_user_id="",
|
|
|
|
is_public=False,
|
|
|
|
)
|
|
|
|
except StoreError:
|
|
|
|
logger.exception("Failed to store room.")
|
2014-08-19 09:20:03 -04:00
|
|
|
|
2014-10-17 10:04:17 -04:00
|
|
|
if not backfilled:
|
|
|
|
extra_users = []
|
2014-12-16 06:29:05 -05:00
|
|
|
if event.type == EventTypes.Member:
|
2014-10-17 10:04:17 -04:00
|
|
|
target_user_id = event.state_key
|
2015-01-23 06:47:15 -05:00
|
|
|
target_user = UserID.from_string(target_user_id)
|
2014-10-17 10:04:17 -04:00
|
|
|
extra_users.append(target_user)
|
|
|
|
|
2015-05-08 14:53:34 -04:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
d = self.notifier.on_new_room_event(
|
2015-05-13 08:42:21 -04:00
|
|
|
event, event_stream_id, max_stream_id,
|
|
|
|
extra_users=extra_users
|
2015-05-08 14:53:34 -04:00
|
|
|
)
|
2014-10-17 10:04:17 -04:00
|
|
|
|
2015-04-08 09:08:30 -04:00
|
|
|
def log_failure(f):
|
|
|
|
logger.warn(
|
|
|
|
"Failed to notify about %s: %s",
|
|
|
|
event.event_id, f.value
|
|
|
|
)
|
|
|
|
|
|
|
|
d.addErrback(log_failure)
|
|
|
|
|
2014-12-16 06:29:05 -05:00
|
|
|
if event.type == EventTypes.Member:
|
2014-08-21 09:38:22 -04:00
|
|
|
if event.membership == Membership.JOIN:
|
2015-01-23 06:47:15 -05:00
|
|
|
user = UserID.from_string(event.state_key)
|
2014-11-20 11:24:00 -05:00
|
|
|
yield self.distributor.fire(
|
2014-08-21 09:38:22 -04:00
|
|
|
"user_joined_room", user=user, room_id=event.room_id
|
|
|
|
)
|
|
|
|
|
2015-07-03 12:52:57 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _filter_events_for_server(self, server_name, room_id, events):
|
2015-08-11 05:41:40 -04:00
|
|
|
event_to_state = yield self.store.get_state_for_events(
|
2015-08-04 04:32:23 -04:00
|
|
|
room_id, frozenset(e.event_id for e in events),
|
|
|
|
types=(
|
|
|
|
(EventTypes.RoomHistoryVisibility, ""),
|
|
|
|
(EventTypes.Member, None),
|
|
|
|
)
|
2015-07-03 12:52:57 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
def redact_disallowed(event_and_state):
|
|
|
|
event, state = event_and_state
|
|
|
|
|
|
|
|
if not state:
|
|
|
|
return event
|
|
|
|
|
|
|
|
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
|
2015-07-06 08:05:52 -04:00
|
|
|
if history:
|
|
|
|
visibility = history.content.get("history_visibility", "shared")
|
|
|
|
if visibility in ["invited", "joined"]:
|
2015-07-09 12:09:26 -04:00
|
|
|
# We now loop through all state events looking for
|
|
|
|
# membership states for the requesting server to determine
|
|
|
|
# if the server is either in the room or has been invited
|
|
|
|
# into the room.
|
2015-07-06 08:05:52 -04:00
|
|
|
for ev in state.values():
|
|
|
|
if ev.type != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
domain = UserID.from_string(ev.state_key).domain
|
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if domain != server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
memtype = ev.membership
|
|
|
|
if memtype == Membership.JOIN:
|
|
|
|
return event
|
|
|
|
elif memtype == Membership.INVITE:
|
|
|
|
if visibility == "invited":
|
|
|
|
return event
|
|
|
|
else:
|
|
|
|
return prune_event(event)
|
2015-07-03 12:52:57 -04:00
|
|
|
|
|
|
|
return event
|
|
|
|
|
2015-08-11 05:41:40 -04:00
|
|
|
res = map(redact_disallowed, [
|
|
|
|
(e, event_to_state[e.event_id])
|
|
|
|
for e in events
|
|
|
|
])
|
2015-07-03 12:52:57 -04:00
|
|
|
|
|
|
|
defer.returnValue(res)
|
|
|
|
|
2014-08-19 09:20:03 -04:00
|
|
|
@log_function
|
|
|
|
@defer.inlineCallbacks
|
2015-05-11 13:01:31 -04:00
|
|
|
def backfill(self, dest, room_id, limit, extremities=[]):
|
2014-11-12 11:20:21 -05:00
|
|
|
""" Trigger a backfill request to `dest` for the given `room_id`
|
|
|
|
"""
|
2015-05-11 13:01:31 -04:00
|
|
|
if not extremities:
|
|
|
|
extremities = yield self.store.get_oldest_events_in_room(room_id)
|
2014-10-31 05:59:02 -04:00
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
events = yield self.replication_layer.backfill(
|
2014-10-31 05:59:02 -04:00
|
|
|
dest,
|
|
|
|
room_id,
|
2015-05-20 06:59:02 -04:00
|
|
|
limit=limit,
|
2014-11-10 06:59:51 -05:00
|
|
|
extremities=extremities,
|
2014-10-31 05:59:02 -04:00
|
|
|
)
|
2014-08-26 14:49:42 -04:00
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
event_map = {e.event_id: e for e in events}
|
2014-08-26 14:49:42 -04:00
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
event_ids = set(e.event_id for e in events)
|
2014-10-15 11:06:59 -04:00
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
edges = [
|
|
|
|
ev.event_id
|
|
|
|
for ev in events
|
|
|
|
if set(e_id for e_id, _ in ev.prev_events) - event_ids
|
|
|
|
]
|
2014-10-15 11:06:59 -04:00
|
|
|
|
2015-06-02 05:28:14 -04:00
|
|
|
logger.info(
|
|
|
|
"backfill: Got %d events with %d edges",
|
|
|
|
len(events), len(edges),
|
|
|
|
)
|
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
# For each edge get the current state.
|
2014-10-15 11:06:59 -04:00
|
|
|
|
2015-05-20 06:59:02 -04:00
|
|
|
auth_events = {}
|
2015-06-02 05:11:32 -04:00
|
|
|
state_events = {}
|
2015-05-20 06:59:02 -04:00
|
|
|
events_to_state = {}
|
|
|
|
for e_id in edges:
|
|
|
|
state, auth = yield self.replication_layer.get_state_for_room(
|
|
|
|
destination=dest,
|
|
|
|
room_id=room_id,
|
|
|
|
event_id=e_id
|
|
|
|
)
|
|
|
|
auth_events.update({a.event_id: a for a in auth})
|
2015-06-02 05:58:35 -04:00
|
|
|
auth_events.update({s.event_id: s for s in state})
|
2015-06-02 05:11:32 -04:00
|
|
|
state_events.update({s.event_id: s for s in state})
|
2015-05-20 06:59:02 -04:00
|
|
|
events_to_state[e_id] = state
|
|
|
|
|
2015-06-02 05:11:32 -04:00
|
|
|
seen_events = yield self.store.have_events(
|
|
|
|
set(auth_events.keys()) | set(state_events.keys())
|
|
|
|
)
|
|
|
|
|
2015-06-02 05:58:35 -04:00
|
|
|
all_events = events + state_events.values() + auth_events.values()
|
|
|
|
required_auth = set(
|
|
|
|
a_id for event in all_events for a_id, _ in event.auth_events
|
|
|
|
)
|
|
|
|
|
|
|
|
missing_auth = required_auth - set(auth_events)
|
|
|
|
results = yield defer.gatherResults(
|
2015-05-20 06:59:02 -04:00
|
|
|
[
|
2015-06-02 05:58:35 -04:00
|
|
|
self.replication_layer.get_pdu(
|
|
|
|
[dest],
|
|
|
|
event_id,
|
|
|
|
outlier=True,
|
2015-06-02 06:00:37 -04:00
|
|
|
timeout=10000,
|
2015-06-01 12:02:23 -04:00
|
|
|
)
|
2015-06-02 05:58:35 -04:00
|
|
|
for event_id in missing_auth
|
2015-06-02 05:11:32 -04:00
|
|
|
],
|
2015-06-02 05:58:35 -04:00
|
|
|
consumeErrors=True
|
2015-06-02 05:11:32 -04:00
|
|
|
).addErrback(unwrapFirstError)
|
2015-06-02 05:58:35 -04:00
|
|
|
auth_events.update({a.event_id: a for a in results})
|
2015-06-02 05:11:32 -04:00
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
ev_infos = []
|
|
|
|
for a in auth_events.values():
|
|
|
|
if a.event_id in seen_events:
|
|
|
|
continue
|
|
|
|
ev_infos.append({
|
|
|
|
"event": a,
|
|
|
|
"auth_events": {
|
|
|
|
(auth_events[a_id].type, auth_events[a_id].state_key):
|
|
|
|
auth_events[a_id]
|
|
|
|
for a_id, _ in a.auth_events
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
for e_id in events_to_state:
|
|
|
|
ev_infos.append({
|
|
|
|
"event": event_map[e_id],
|
|
|
|
"state": events_to_state[e_id],
|
|
|
|
"auth_events": {
|
|
|
|
(auth_events[a_id].type, auth_events[a_id].state_key):
|
|
|
|
auth_events[a_id]
|
|
|
|
for a_id, _ in event_map[e_id].auth_events
|
|
|
|
}
|
|
|
|
})
|
2015-05-20 06:59:02 -04:00
|
|
|
|
|
|
|
events.sort(key=lambda e: e.depth)
|
|
|
|
|
|
|
|
for event in events:
|
|
|
|
if event in events_to_state:
|
|
|
|
continue
|
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
ev_infos.append({
|
|
|
|
"event": event,
|
|
|
|
})
|
|
|
|
|
|
|
|
yield self._handle_new_events(
|
|
|
|
dest, ev_infos,
|
|
|
|
backfilled=True,
|
|
|
|
)
|
2014-08-26 14:49:42 -04:00
|
|
|
|
|
|
|
defer.returnValue(events)
|
|
|
|
|
2015-05-11 13:01:31 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def maybe_backfill(self, room_id, current_depth):
|
2015-05-12 05:35:45 -04:00
|
|
|
"""Checks the database to see if we should backfill before paginating,
|
|
|
|
and if so do.
|
2015-05-11 13:01:31 -04:00
|
|
|
"""
|
|
|
|
extremities = yield self.store.get_oldest_events_with_depth_in_room(
|
|
|
|
room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
if not extremities:
|
2015-05-12 05:35:45 -04:00
|
|
|
logger.debug("Not backfilling as no extremeties found.")
|
2015-05-11 13:01:31 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
# Check if we reached a point where we should start backfilling.
|
|
|
|
sorted_extremeties_tuple = sorted(
|
|
|
|
extremities.items(),
|
|
|
|
key=lambda e: -int(e[1])
|
|
|
|
)
|
|
|
|
max_depth = sorted_extremeties_tuple[0][1]
|
|
|
|
|
|
|
|
if current_depth > max_depth:
|
2015-05-12 05:35:45 -04:00
|
|
|
logger.debug(
|
|
|
|
"Not backfilling as we don't need to. %d < %d",
|
2015-05-12 09:00:31 -04:00
|
|
|
max_depth, current_depth,
|
2015-05-12 05:35:45 -04:00
|
|
|
)
|
2015-05-11 13:01:31 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
# Now we need to decide which hosts to hit first.
|
|
|
|
|
2015-05-12 05:35:45 -04:00
|
|
|
# First we try hosts that are already in the room
|
|
|
|
# TODO: HEURISTIC ALERT.
|
2015-05-11 13:01:31 -04:00
|
|
|
|
|
|
|
curr_state = yield self.state_handler.get_current_state(room_id)
|
|
|
|
|
|
|
|
def get_domains_from_state(state):
|
|
|
|
joined_users = [
|
|
|
|
(state_key, int(event.depth))
|
|
|
|
for (e_type, state_key), event in state.items()
|
|
|
|
if e_type == EventTypes.Member
|
|
|
|
and event.membership == Membership.JOIN
|
|
|
|
]
|
|
|
|
|
|
|
|
joined_domains = {}
|
|
|
|
for u, d in joined_users:
|
|
|
|
try:
|
|
|
|
dom = UserID.from_string(u).domain
|
|
|
|
old_d = joined_domains.get(dom)
|
|
|
|
if old_d:
|
|
|
|
joined_domains[dom] = min(d, old_d)
|
|
|
|
else:
|
|
|
|
joined_domains[dom] = d
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return sorted(joined_domains.items(), key=lambda d: d[1])
|
|
|
|
|
|
|
|
curr_domains = get_domains_from_state(curr_state)
|
|
|
|
|
|
|
|
likely_domains = [
|
|
|
|
domain for domain, depth in curr_domains
|
2015-05-12 11:19:42 -04:00
|
|
|
if domain is not self.server_name
|
2015-05-11 13:01:31 -04:00
|
|
|
]
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def try_backfill(domains):
|
|
|
|
# TODO: Should we try multiple of these at a time?
|
|
|
|
for dom in domains:
|
2015-05-12 05:35:45 -04:00
|
|
|
try:
|
|
|
|
events = yield self.backfill(
|
|
|
|
dom, room_id,
|
|
|
|
limit=100,
|
|
|
|
extremities=[e for e in extremities.keys()]
|
|
|
|
)
|
|
|
|
except SynapseError:
|
|
|
|
logger.info(
|
2015-05-12 09:09:54 -04:00
|
|
|
"Failed to backfill from %s because %s",
|
2015-05-12 05:35:45 -04:00
|
|
|
dom, e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
except CodeMessageException as e:
|
|
|
|
if 400 <= e.code < 500:
|
|
|
|
raise
|
|
|
|
|
|
|
|
logger.info(
|
2015-05-12 09:09:54 -04:00
|
|
|
"Failed to backfill from %s because %s",
|
2015-05-12 05:35:45 -04:00
|
|
|
dom, e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
except NotRetryingDestination as e:
|
|
|
|
logger.info(e.message)
|
|
|
|
continue
|
|
|
|
except Exception as e:
|
2015-05-20 06:59:02 -04:00
|
|
|
logger.exception(
|
2015-05-12 09:09:54 -04:00
|
|
|
"Failed to backfill from %s because %s",
|
2015-05-12 05:35:45 -04:00
|
|
|
dom, e,
|
|
|
|
)
|
|
|
|
continue
|
2015-05-11 13:01:31 -04:00
|
|
|
|
|
|
|
if events:
|
|
|
|
defer.returnValue(True)
|
|
|
|
defer.returnValue(False)
|
|
|
|
|
|
|
|
success = yield try_backfill(likely_domains)
|
|
|
|
if success:
|
|
|
|
defer.returnValue(True)
|
|
|
|
|
|
|
|
# Huh, well *those* domains didn't work out. Lets try some domains
|
|
|
|
# from the time.
|
|
|
|
|
|
|
|
tried_domains = set(likely_domains)
|
2015-05-12 11:19:42 -04:00
|
|
|
tried_domains.add(self.server_name)
|
2015-05-11 13:01:31 -04:00
|
|
|
|
2015-05-12 08:58:14 -04:00
|
|
|
event_ids = list(extremities.keys())
|
|
|
|
|
|
|
|
states = yield defer.gatherResults([
|
2015-08-05 10:06:51 -04:00
|
|
|
self.state_handler.resolve_state_groups(room_id, [e])
|
2015-05-12 08:58:14 -04:00
|
|
|
for e in event_ids
|
|
|
|
])
|
2015-05-12 09:00:31 -04:00
|
|
|
states = dict(zip(event_ids, [s[1] for s in states]))
|
2015-05-11 13:01:31 -04:00
|
|
|
|
|
|
|
for e_id, _ in sorted_extremeties_tuple:
|
2015-05-12 09:02:01 -04:00
|
|
|
likely_domains = get_domains_from_state(states[e_id])
|
2015-05-11 13:01:31 -04:00
|
|
|
|
|
|
|
success = yield try_backfill([
|
|
|
|
dom for dom in likely_domains
|
|
|
|
if dom not in tried_domains
|
|
|
|
])
|
|
|
|
if success:
|
|
|
|
defer.returnValue(True)
|
|
|
|
|
|
|
|
tried_domains.update(likely_domains)
|
|
|
|
|
|
|
|
defer.returnValue(False)
|
|
|
|
|
2014-11-07 08:41:00 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_invite(self, target_host, event):
|
2014-11-12 11:20:21 -05:00
|
|
|
""" Sends the invite to the remote server for signing.
|
|
|
|
|
|
|
|
Invites must be signed by the invitee's server before distribution.
|
|
|
|
"""
|
2014-11-07 08:41:00 -05:00
|
|
|
pdu = yield self.replication_layer.send_invite(
|
|
|
|
destination=target_host,
|
2015-01-16 13:59:04 -05:00
|
|
|
room_id=event.room_id,
|
2014-11-07 08:41:00 -05:00
|
|
|
event_id=event.event_id,
|
2014-11-14 16:25:02 -05:00
|
|
|
pdu=event
|
2014-11-07 08:41:00 -05:00
|
|
|
)
|
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
defer.returnValue(pdu)
|
2014-11-07 08:41:00 -05:00
|
|
|
|
2014-11-07 10:35:53 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_event_auth(self, event_id):
|
2014-12-16 13:57:36 -05:00
|
|
|
auth = yield self.store.get_auth_chain([event_id])
|
2014-11-27 11:02:26 -05:00
|
|
|
|
|
|
|
for event in auth:
|
|
|
|
event.signatures.update(
|
|
|
|
compute_event_signature(
|
|
|
|
event,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
defer.returnValue([e for e in auth])
|
2014-11-07 10:35:53 -05:00
|
|
|
|
2014-08-20 09:42:36 -04:00
|
|
|
@log_function
|
|
|
|
@defer.inlineCallbacks
|
2015-02-05 08:43:28 -05:00
|
|
|
def do_invite_join(self, target_hosts, room_id, joinee, content, snapshot):
|
2014-11-12 11:20:21 -05:00
|
|
|
""" Attempts to join the `joinee` to the room `room_id` via the
|
|
|
|
server `target_host`.
|
|
|
|
|
|
|
|
This first triggers a /make_join/ request that returns a partial
|
|
|
|
event that we can fill out and sign. This is then sent to the
|
|
|
|
remote server via /send_join/ which responds with the state at that
|
|
|
|
event and the auth_chains.
|
|
|
|
|
|
|
|
We suspend processing of any received events from this room until we
|
|
|
|
have finished processing the join.
|
|
|
|
"""
|
2014-11-25 06:31:18 -05:00
|
|
|
logger.debug("Joining %s to %s", joinee, room_id)
|
|
|
|
|
2015-03-18 07:19:47 -04:00
|
|
|
yield self.store.clean_room_for_join(room_id)
|
|
|
|
|
2015-02-05 08:43:28 -05:00
|
|
|
origin, pdu = yield self.replication_layer.make_join(
|
|
|
|
target_hosts,
|
2014-10-17 10:04:17 -04:00
|
|
|
room_id,
|
|
|
|
joinee
|
2014-08-20 09:42:36 -04:00
|
|
|
)
|
|
|
|
|
2014-10-17 10:04:17 -04:00
|
|
|
logger.debug("Got response to make_join: %s", pdu)
|
2014-08-20 09:42:36 -04:00
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
event = pdu
|
2014-10-17 10:04:17 -04:00
|
|
|
|
|
|
|
# We should assert some things.
|
2014-12-16 06:29:05 -05:00
|
|
|
# FIXME: Do this in a nicer way
|
|
|
|
assert(event.type == EventTypes.Member)
|
2014-10-17 10:04:17 -04:00
|
|
|
assert(event.user_id == joinee)
|
|
|
|
assert(event.state_key == joinee)
|
|
|
|
assert(event.room_id == room_id)
|
|
|
|
|
2014-12-08 04:08:26 -05:00
|
|
|
event.internal_metadata.outlier = False
|
2014-10-17 13:56:42 -04:00
|
|
|
|
2014-10-29 12:59:24 -04:00
|
|
|
self.room_queues[room_id] = []
|
2014-08-20 09:42:36 -04:00
|
|
|
|
2014-12-08 12:50:56 -05:00
|
|
|
builder = self.event_builder_factory.new(
|
2015-02-11 10:44:28 -05:00
|
|
|
unfreeze(event.get_pdu_json())
|
2014-12-08 12:50:56 -05:00
|
|
|
)
|
|
|
|
|
2014-12-10 10:55:03 -05:00
|
|
|
handled_events = set()
|
|
|
|
|
2014-10-29 12:59:24 -04:00
|
|
|
try:
|
2014-12-11 10:56:01 -05:00
|
|
|
builder.event_id = self.event_builder_factory.create_event_id()
|
2014-12-08 12:50:56 -05:00
|
|
|
builder.origin = self.hs.hostname
|
|
|
|
builder.content = content
|
2014-09-03 14:13:41 -04:00
|
|
|
|
2014-11-25 06:31:18 -05:00
|
|
|
if not hasattr(event, "signatures"):
|
2014-12-08 12:50:56 -05:00
|
|
|
builder.signatures = {}
|
2014-11-25 06:31:18 -05:00
|
|
|
|
|
|
|
add_hashes_and_signatures(
|
2014-12-08 12:50:56 -05:00
|
|
|
builder,
|
2014-11-25 06:31:18 -05:00
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0],
|
|
|
|
)
|
|
|
|
|
2014-12-08 12:50:56 -05:00
|
|
|
new_event = builder.build()
|
|
|
|
|
2015-02-05 08:43:28 -05:00
|
|
|
# Try the host we successfully got a response to /make_join/
|
|
|
|
# request first.
|
2015-02-06 05:53:18 -05:00
|
|
|
try:
|
|
|
|
target_hosts.remove(origin)
|
|
|
|
target_hosts.insert(0, origin)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2015-02-05 08:43:28 -05:00
|
|
|
|
2014-11-25 06:31:18 -05:00
|
|
|
ret = yield self.replication_layer.send_join(
|
2015-02-05 08:43:28 -05:00
|
|
|
target_hosts,
|
2014-12-08 12:50:56 -05:00
|
|
|
new_event
|
2014-10-29 12:59:24 -04:00
|
|
|
)
|
2014-10-17 13:56:42 -04:00
|
|
|
|
2015-02-05 08:43:28 -05:00
|
|
|
origin = ret["origin"]
|
2014-11-25 06:31:18 -05:00
|
|
|
state = ret["state"]
|
|
|
|
auth_chain = ret["auth_chain"]
|
2014-11-27 11:02:26 -05:00
|
|
|
auth_chain.sort(key=lambda e: e.depth)
|
2014-08-20 09:42:36 -04:00
|
|
|
|
2014-12-10 10:55:03 -05:00
|
|
|
handled_events.update([s.event_id for s in state])
|
|
|
|
handled_events.update([a.event_id for a in auth_chain])
|
|
|
|
handled_events.add(new_event.event_id)
|
|
|
|
|
2014-11-25 06:31:18 -05:00
|
|
|
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
|
|
|
logger.debug("do_invite_join state: %s", state)
|
2014-10-17 13:56:42 -04:00
|
|
|
|
2014-12-08 12:50:56 -05:00
|
|
|
logger.debug("do_invite_join event: %s", new_event)
|
2014-10-29 12:59:24 -04:00
|
|
|
|
|
|
|
try:
|
|
|
|
yield self.store.store_room(
|
|
|
|
room_id=room_id,
|
|
|
|
room_creator_user_id="",
|
|
|
|
is_public=False
|
|
|
|
)
|
|
|
|
except:
|
|
|
|
# FIXME
|
|
|
|
pass
|
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
ev_infos = []
|
|
|
|
for e in itertools.chain(state, auth_chain):
|
2015-01-30 11:51:58 -05:00
|
|
|
if e.event_id == event.event_id:
|
2015-06-25 12:18:19 -04:00
|
|
|
continue
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2014-12-08 04:08:26 -05:00
|
|
|
e.internal_metadata.outlier = True
|
2015-06-25 12:18:19 -04:00
|
|
|
auth_ids = [e_id for e_id, _ in e.auth_events]
|
|
|
|
ev_infos.append({
|
|
|
|
"event": e,
|
|
|
|
"auth_events": {
|
2015-01-29 11:50:23 -05:00
|
|
|
(e.type, e.state_key): e for e in auth_chain
|
|
|
|
if e.event_id in auth_ids
|
|
|
|
}
|
2015-06-25 12:18:19 -04:00
|
|
|
})
|
2014-10-29 12:59:24 -04:00
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
yield self._handle_new_events(origin, ev_infos, outliers=True)
|
2015-05-18 12:17:04 -04:00
|
|
|
|
2015-01-30 11:51:58 -05:00
|
|
|
auth_ids = [e_id for e_id, _ in event.auth_events]
|
|
|
|
auth_events = {
|
|
|
|
(e.type, e.state_key): e for e in auth_chain
|
|
|
|
if e.event_id in auth_ids
|
|
|
|
}
|
|
|
|
|
2015-05-13 08:42:21 -04:00
|
|
|
_, event_stream_id, max_stream_id = yield self._handle_new_event(
|
2015-02-05 08:43:28 -05:00
|
|
|
origin,
|
2014-12-08 12:50:56 -05:00
|
|
|
new_event,
|
2014-11-26 05:41:08 -05:00
|
|
|
state=state,
|
2014-11-27 11:02:26 -05:00
|
|
|
current_state=state,
|
2015-01-30 11:51:58 -05:00
|
|
|
auth_events=auth_events,
|
2014-11-26 05:41:08 -05:00
|
|
|
)
|
2014-10-29 12:59:24 -04:00
|
|
|
|
2015-05-08 14:53:34 -04:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
d = self.notifier.on_new_room_event(
|
2015-05-13 08:42:21 -04:00
|
|
|
new_event, event_stream_id, max_stream_id,
|
|
|
|
extra_users=[joinee]
|
2015-05-08 14:53:34 -04:00
|
|
|
)
|
2014-11-25 06:31:18 -05:00
|
|
|
|
2015-04-08 09:08:30 -04:00
|
|
|
def log_failure(f):
|
|
|
|
logger.warn(
|
|
|
|
"Failed to notify about %s: %s",
|
2015-04-08 09:10:06 -04:00
|
|
|
new_event.event_id, f.value
|
2015-04-08 09:08:30 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
d.addErrback(log_failure)
|
|
|
|
|
2014-11-25 06:31:18 -05:00
|
|
|
logger.debug("Finished joining %s to %s", joinee, room_id)
|
2014-10-29 12:59:24 -04:00
|
|
|
finally:
|
|
|
|
room_queue = self.room_queues[room_id]
|
|
|
|
del self.room_queues[room_id]
|
2014-10-17 13:56:42 -04:00
|
|
|
|
2014-11-27 09:46:33 -05:00
|
|
|
for p, origin in room_queue:
|
2014-12-10 10:55:03 -05:00
|
|
|
if p.event_id in handled_events:
|
|
|
|
continue
|
|
|
|
|
2014-10-30 07:53:35 -04:00
|
|
|
try:
|
2014-11-27 09:46:33 -05:00
|
|
|
self.on_receive_pdu(origin, p, backfilled=False)
|
2014-10-30 07:53:35 -04:00
|
|
|
except:
|
2014-11-25 06:31:18 -05:00
|
|
|
logger.exception("Couldn't handle pdu")
|
2014-10-17 13:56:42 -04:00
|
|
|
|
2014-08-20 09:42:36 -04:00
|
|
|
defer.returnValue(True)
|
2014-08-21 09:38:22 -04:00
|
|
|
|
2014-10-16 11:56:51 -04:00
|
|
|
@defer.inlineCallbacks
|
2014-10-17 14:37:41 -04:00
|
|
|
@log_function
|
2014-12-05 11:20:48 -05:00
|
|
|
def on_make_join_request(self, room_id, user_id):
|
2014-11-12 11:20:21 -05:00
|
|
|
""" We've received a /make_join/ request, so we create a partial
|
|
|
|
join event for the room and return that. We don *not* persist or
|
|
|
|
process it until the other server has signed it and sent it back.
|
|
|
|
"""
|
2014-12-04 10:50:01 -05:00
|
|
|
builder = self.event_builder_factory.new({
|
2014-12-16 06:29:05 -05:00
|
|
|
"type": EventTypes.Member,
|
2014-12-04 10:50:01 -05:00
|
|
|
"content": {"membership": Membership.JOIN},
|
2014-12-05 11:20:48 -05:00
|
|
|
"room_id": room_id,
|
2014-12-04 10:50:01 -05:00
|
|
|
"sender": user_id,
|
|
|
|
"state_key": user_id,
|
|
|
|
})
|
|
|
|
|
|
|
|
event, context = yield self._create_new_client_event(
|
|
|
|
builder=builder,
|
2014-10-16 11:56:51 -04:00
|
|
|
)
|
|
|
|
|
2015-03-16 09:06:23 -04:00
|
|
|
self.auth.check(event, auth_events=context.current_state)
|
2014-10-17 14:37:41 -04:00
|
|
|
|
2015-03-16 09:06:23 -04:00
|
|
|
defer.returnValue(event)
|
2014-10-16 11:56:51 -04:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2014-10-17 14:37:41 -04:00
|
|
|
@log_function
|
2014-10-16 11:56:51 -04:00
|
|
|
def on_send_join_request(self, origin, pdu):
|
2014-11-12 11:20:21 -05:00
|
|
|
""" We have received a join event for a room. Fully process it and
|
|
|
|
respond with the current state and auth chains.
|
|
|
|
"""
|
2014-11-14 16:25:02 -05:00
|
|
|
event = pdu
|
2014-10-16 11:56:51 -04:00
|
|
|
|
2014-12-10 05:06:12 -05:00
|
|
|
logger.debug(
|
|
|
|
"on_send_join_request: Got event: %s, signatures: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
2014-12-08 04:08:26 -05:00
|
|
|
event.internal_metadata.outlier = False
|
2014-10-17 14:37:41 -04:00
|
|
|
|
2015-05-13 08:42:21 -04:00
|
|
|
context, event_stream_id, max_stream_id = yield self._handle_new_event(
|
|
|
|
origin, event
|
|
|
|
)
|
2014-10-16 11:56:51 -04:00
|
|
|
|
2014-12-10 05:06:12 -05:00
|
|
|
logger.debug(
|
|
|
|
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
2014-10-16 11:56:51 -04:00
|
|
|
extra_users = []
|
2014-12-16 06:29:05 -05:00
|
|
|
if event.type == EventTypes.Member:
|
2014-10-16 11:56:51 -04:00
|
|
|
target_user_id = event.state_key
|
2015-01-23 06:47:15 -05:00
|
|
|
target_user = UserID.from_string(target_user_id)
|
2014-10-16 11:56:51 -04:00
|
|
|
extra_users.append(target_user)
|
|
|
|
|
2015-05-08 14:53:34 -04:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
d = self.notifier.on_new_room_event(
|
2015-05-13 08:42:21 -04:00
|
|
|
event, event_stream_id, max_stream_id, extra_users=extra_users
|
2015-05-08 14:53:34 -04:00
|
|
|
)
|
2014-10-16 11:56:51 -04:00
|
|
|
|
2015-04-08 09:08:30 -04:00
|
|
|
def log_failure(f):
|
|
|
|
logger.warn(
|
|
|
|
"Failed to notify about %s: %s",
|
|
|
|
event.event_id, f.value
|
|
|
|
)
|
|
|
|
|
|
|
|
d.addErrback(log_failure)
|
|
|
|
|
2014-12-16 06:29:05 -05:00
|
|
|
if event.type == EventTypes.Member:
|
2014-11-25 06:31:18 -05:00
|
|
|
if event.content["membership"] == Membership.JOIN:
|
2015-01-23 06:47:15 -05:00
|
|
|
user = UserID.from_string(event.state_key)
|
2014-11-20 11:24:00 -05:00
|
|
|
yield self.distributor.fire(
|
2014-10-16 11:56:51 -04:00
|
|
|
"user_joined_room", user=user, room_id=event.room_id
|
|
|
|
)
|
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
new_pdu = event
|
2014-11-07 11:03:31 -05:00
|
|
|
|
|
|
|
destinations = set()
|
|
|
|
|
2014-12-08 12:50:56 -05:00
|
|
|
for k, s in context.current_state.items():
|
2014-11-07 11:03:31 -05:00
|
|
|
try:
|
2014-12-16 06:29:05 -05:00
|
|
|
if k[0] == EventTypes.Member:
|
2014-11-07 11:03:31 -05:00
|
|
|
if s.content["membership"] == Membership.JOIN:
|
|
|
|
destinations.add(
|
2015-01-23 06:47:15 -05:00
|
|
|
UserID.from_string(s.state_key).domain
|
2014-11-07 11:03:31 -05:00
|
|
|
)
|
|
|
|
except:
|
|
|
|
logger.warn(
|
|
|
|
"Failed to get destination from event %s", s.event_id
|
|
|
|
)
|
|
|
|
|
2015-02-06 05:53:18 -05:00
|
|
|
destinations.discard(origin)
|
2015-02-04 05:16:51 -05:00
|
|
|
|
2014-12-10 05:06:12 -05:00
|
|
|
logger.debug(
|
|
|
|
"on_send_join_request: Sending event: %s, signatures: %s",
|
|
|
|
event.event_id,
|
|
|
|
event.signatures,
|
|
|
|
)
|
|
|
|
|
2014-12-18 06:29:46 -05:00
|
|
|
self.replication_layer.send_pdu(new_pdu, destinations)
|
2014-10-16 11:56:51 -04:00
|
|
|
|
2014-12-16 14:16:15 -05:00
|
|
|
state_ids = [e.event_id for e in context.current_state.values()]
|
2014-12-16 13:57:36 -05:00
|
|
|
auth_chain = yield self.store.get_auth_chain(set(
|
|
|
|
[event.event_id] + state_ids
|
|
|
|
))
|
2014-11-07 06:22:12 -05:00
|
|
|
|
|
|
|
defer.returnValue({
|
2014-12-08 12:50:56 -05:00
|
|
|
"state": context.current_state.values(),
|
2014-11-14 16:25:02 -05:00
|
|
|
"auth_chain": auth_chain,
|
2014-11-07 06:22:12 -05:00
|
|
|
})
|
2014-10-16 11:56:51 -04:00
|
|
|
|
2014-11-07 08:41:00 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_invite_request(self, origin, pdu):
|
2014-11-12 11:20:21 -05:00
|
|
|
""" We've got an invite event. Process and persist it. Sign it.
|
|
|
|
|
|
|
|
Respond with the now signed event.
|
|
|
|
"""
|
2014-11-14 16:25:02 -05:00
|
|
|
event = pdu
|
2014-11-07 08:41:00 -05:00
|
|
|
|
2014-12-05 11:20:48 -05:00
|
|
|
event.internal_metadata.outlier = True
|
2014-11-07 08:41:00 -05:00
|
|
|
|
|
|
|
event.signatures.update(
|
|
|
|
compute_event_signature(
|
|
|
|
event,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-12-16 10:59:17 -05:00
|
|
|
context = yield self.state_handler.compute_event_context(event)
|
2014-11-07 08:41:00 -05:00
|
|
|
|
2015-05-13 08:42:21 -04:00
|
|
|
event_stream_id, max_stream_id = yield self.store.persist_event(
|
2014-11-07 08:41:00 -05:00
|
|
|
event,
|
2014-12-05 11:20:48 -05:00
|
|
|
context=context,
|
2014-11-07 08:41:00 -05:00
|
|
|
backfilled=False,
|
|
|
|
)
|
|
|
|
|
2015-01-23 06:47:15 -05:00
|
|
|
target_user = UserID.from_string(event.state_key)
|
2015-05-08 14:53:34 -04:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
d = self.notifier.on_new_room_event(
|
2015-05-13 08:42:21 -04:00
|
|
|
event, event_stream_id, max_stream_id,
|
|
|
|
extra_users=[target_user],
|
2015-05-08 14:53:34 -04:00
|
|
|
)
|
2014-11-07 08:41:00 -05:00
|
|
|
|
2015-04-08 09:08:30 -04:00
|
|
|
def log_failure(f):
|
|
|
|
logger.warn(
|
|
|
|
"Failed to notify about %s: %s",
|
|
|
|
event.event_id, f.value
|
|
|
|
)
|
|
|
|
|
|
|
|
d.addErrback(log_failure)
|
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
defer.returnValue(event)
|
2014-11-07 08:41:00 -05:00
|
|
|
|
2014-10-17 10:04:17 -04:00
|
|
|
@defer.inlineCallbacks
|
2015-02-19 12:24:14 -05:00
|
|
|
def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
|
2014-10-30 13:00:11 -04:00
|
|
|
yield run_on_reactor()
|
|
|
|
|
2015-02-19 12:24:14 -05:00
|
|
|
if do_auth:
|
|
|
|
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
2014-11-10 08:37:24 -05:00
|
|
|
|
2014-10-17 10:04:17 -04:00
|
|
|
state_groups = yield self.store.get_state_groups(
|
2014-10-30 07:53:35 -04:00
|
|
|
[event_id]
|
2014-10-17 10:04:17 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if state_groups:
|
2014-11-11 09:16:41 -05:00
|
|
|
_, state = state_groups.items().pop()
|
2014-10-30 07:53:35 -04:00
|
|
|
results = {
|
2014-11-11 09:16:41 -05:00
|
|
|
(e.type, e.state_key): e for e in state
|
2014-10-30 07:53:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
event = yield self.store.get_event(event_id)
|
2014-12-11 10:56:01 -05:00
|
|
|
if event and event.is_state():
|
2014-10-30 07:53:35 -04:00
|
|
|
# Get previous state
|
2014-12-11 10:56:01 -05:00
|
|
|
if "replaces_state" in event.unsigned:
|
|
|
|
prev_id = event.unsigned["replaces_state"]
|
|
|
|
if prev_id != event.event_id:
|
|
|
|
prev_event = yield self.store.get_event(prev_id)
|
|
|
|
results[(event.type, event.state_key)] = prev_event
|
2014-10-30 07:53:35 -04:00
|
|
|
else:
|
|
|
|
del results[(event.type, event.state_key)]
|
|
|
|
|
2014-11-27 11:02:26 -05:00
|
|
|
res = results.values()
|
|
|
|
for event in res:
|
|
|
|
event.signatures.update(
|
|
|
|
compute_event_signature(
|
|
|
|
event,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue(res)
|
2014-10-17 10:04:17 -04:00
|
|
|
else:
|
|
|
|
defer.returnValue([])
|
|
|
|
|
2014-10-31 05:59:02 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2015-01-07 10:16:31 -05:00
|
|
|
def on_backfill_request(self, origin, room_id, pdu_list, limit):
|
|
|
|
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
2014-11-10 06:59:51 -05:00
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
2014-10-31 05:59:02 -04:00
|
|
|
|
|
|
|
events = yield self.store.get_backfill_events(
|
2015-01-07 11:18:12 -05:00
|
|
|
room_id,
|
2014-11-03 08:06:58 -05:00
|
|
|
pdu_list,
|
2014-10-31 05:59:02 -04:00
|
|
|
limit
|
|
|
|
)
|
|
|
|
|
2015-07-03 12:52:57 -04:00
|
|
|
events = yield self._filter_events_for_server(origin, room_id, events)
|
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
defer.returnValue(events)
|
2014-10-31 05:59:02 -04:00
|
|
|
|
2014-10-31 06:47:34 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
@log_function
|
2014-11-24 07:56:17 -05:00
|
|
|
def get_persisted_pdu(self, origin, event_id, do_auth=True):
|
2014-10-31 06:47:34 -04:00
|
|
|
""" Get a PDU from the database with given origin and id.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: Results in a `Pdu`.
|
|
|
|
"""
|
|
|
|
event = yield self.store.get_event(
|
2014-11-03 08:06:58 -05:00
|
|
|
event_id,
|
2014-10-31 06:47:34 -04:00
|
|
|
allow_none=True,
|
2015-02-03 05:40:14 -05:00
|
|
|
allow_rejected=True,
|
2014-10-31 06:47:34 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if event:
|
2014-11-27 08:53:31 -05:00
|
|
|
# FIXME: This is a temporary work around where we occasionally
|
|
|
|
# return events slightly differently than when they were
|
|
|
|
# originally signed
|
|
|
|
event.signatures.update(
|
|
|
|
compute_event_signature(
|
|
|
|
event,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-11-24 07:56:17 -05:00
|
|
|
if do_auth:
|
|
|
|
in_room = yield self.auth.check_host_in_room(
|
|
|
|
event.room_id,
|
|
|
|
origin
|
|
|
|
)
|
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
2014-11-10 08:37:24 -05:00
|
|
|
|
2014-11-14 16:25:02 -05:00
|
|
|
defer.returnValue(event)
|
2014-10-31 06:47:34 -04:00
|
|
|
else:
|
|
|
|
defer.returnValue(None)
|
|
|
|
|
|
|
|
@log_function
|
|
|
|
def get_min_depth_for_context(self, context):
|
|
|
|
return self.store.get_min_depth(context)
|
|
|
|
|
2014-08-21 09:38:22 -04:00
|
|
|
@log_function
|
|
|
|
def _on_user_joined(self, user, room_id):
|
2014-11-10 08:46:44 -05:00
|
|
|
waiters = self.waiting_for_join_list.get(
|
|
|
|
(user.to_string(), room_id),
|
|
|
|
[]
|
|
|
|
)
|
2014-08-21 09:38:22 -04:00
|
|
|
while waiters:
|
|
|
|
waiters.pop().callback(None)
|
2014-11-25 06:31:18 -05:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2015-01-29 11:50:23 -05:00
|
|
|
@log_function
|
|
|
|
def _handle_new_event(self, origin, event, state=None, backfilled=False,
|
|
|
|
current_state=None, auth_events=None):
|
2014-12-10 05:06:12 -05:00
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
outlier = event.internal_metadata.is_outlier()
|
|
|
|
|
|
|
|
context = yield self._prep_event(
|
|
|
|
origin, event,
|
|
|
|
state=state,
|
|
|
|
backfilled=backfilled,
|
|
|
|
current_state=current_state,
|
|
|
|
auth_events=auth_events,
|
|
|
|
)
|
|
|
|
|
|
|
|
event_stream_id, max_stream_id = yield self.store.persist_event(
|
|
|
|
event,
|
|
|
|
context=context,
|
|
|
|
backfilled=backfilled,
|
|
|
|
is_new_state=(not outlier and not backfilled),
|
|
|
|
current_state=current_state,
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue((context, event_stream_id, max_stream_id))
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _handle_new_events(self, origin, event_infos, backfilled=False,
|
|
|
|
outliers=False):
|
|
|
|
contexts = yield defer.gatherResults(
|
|
|
|
[
|
|
|
|
self._prep_event(
|
|
|
|
origin,
|
|
|
|
ev_info["event"],
|
|
|
|
state=ev_info.get("state"),
|
|
|
|
backfilled=backfilled,
|
|
|
|
auth_events=ev_info.get("auth_events"),
|
|
|
|
)
|
|
|
|
for ev_info in event_infos
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
yield self.store.persist_events(
|
|
|
|
[
|
|
|
|
(ev_info["event"], context)
|
|
|
|
for ev_info, context in itertools.izip(event_infos, contexts)
|
|
|
|
],
|
|
|
|
backfilled=backfilled,
|
|
|
|
is_new_state=(not outliers and not backfilled),
|
2014-12-10 05:06:12 -05:00
|
|
|
)
|
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _prep_event(self, origin, event, state=None, backfilled=False,
|
|
|
|
current_state=None, auth_events=None):
|
2015-06-03 11:30:01 -04:00
|
|
|
outlier = event.internal_metadata.is_outlier()
|
|
|
|
|
2014-12-16 10:59:17 -05:00
|
|
|
context = yield self.state_handler.compute_event_context(
|
2015-06-03 11:30:01 -04:00
|
|
|
event, old_state=state, outlier=outlier,
|
2014-11-25 06:31:18 -05:00
|
|
|
)
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
if not auth_events:
|
2015-03-16 09:06:23 -04:00
|
|
|
auth_events = context.current_state
|
2015-01-29 11:50:23 -05:00
|
|
|
|
|
|
|
# This is a hack to fix some old rooms where the initial join event
|
|
|
|
# didn't reference the create event in its auth events.
|
2014-12-16 06:29:05 -05:00
|
|
|
if event.type == EventTypes.Member and not event.auth_events:
|
2015-05-19 09:15:05 -04:00
|
|
|
if len(event.prev_events) == 1 and event.depth < 5:
|
|
|
|
c = yield self.store.get_event(
|
|
|
|
event.prev_events[0][0],
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if c and c.type == EventTypes.Create:
|
2015-01-29 11:50:23 -05:00
|
|
|
auth_events[(c.type, c.state_key)] = c
|
2014-12-10 05:06:12 -05:00
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
try:
|
2015-01-29 11:50:23 -05:00
|
|
|
yield self.do_auth(
|
|
|
|
origin, event, context, auth_events=auth_events
|
|
|
|
)
|
|
|
|
except AuthError as e:
|
|
|
|
logger.warn(
|
|
|
|
"Rejecting %s because %s",
|
|
|
|
event.event_id, e.msg
|
|
|
|
)
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
context.rejected = RejectedReason.AUTH_ERROR
|
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
defer.returnValue(context)
|
2015-01-29 11:50:23 -05:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
|
|
|
|
missing):
|
|
|
|
# Just go through and process each event in `remote_auth_chain`. We
|
|
|
|
# don't want to fall into the trap of `missing` being wrong.
|
|
|
|
for e in remote_auth_chain:
|
|
|
|
try:
|
|
|
|
yield self._handle_new_event(origin, e)
|
|
|
|
except AuthError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Now get the current auth_chain for the event.
|
|
|
|
local_auth_chain = yield self.store.get_auth_chain([event_id])
|
|
|
|
|
|
|
|
# TODO: Check if we would now reject event_id. If so we need to tell
|
|
|
|
# everyone.
|
|
|
|
|
|
|
|
ret = yield self.construct_auth_difference(
|
|
|
|
local_auth_chain, remote_auth_chain
|
2014-12-10 05:06:12 -05:00
|
|
|
)
|
|
|
|
|
2015-01-30 17:53:13 -05:00
|
|
|
for event in ret["auth_chain"]:
|
|
|
|
event.signatures.update(
|
|
|
|
compute_event_signature(
|
|
|
|
event,
|
|
|
|
self.hs.hostname,
|
|
|
|
self.hs.config.signing_key[0]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2015-02-03 08:23:58 -05:00
|
|
|
logger.debug("on_query_auth returning: %s", ret)
|
2015-01-29 11:50:23 -05:00
|
|
|
|
|
|
|
defer.returnValue(ret)
|
2015-01-28 11:16:53 -05:00
|
|
|
|
2015-02-23 08:58:02 -05:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def on_get_missing_events(self, origin, room_id, earliest_events,
|
|
|
|
latest_events, limit, min_depth):
|
|
|
|
in_room = yield self.auth.check_host_in_room(
|
|
|
|
room_id,
|
|
|
|
origin
|
|
|
|
)
|
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
|
|
|
|
|
|
|
limit = min(limit, 20)
|
|
|
|
min_depth = max(min_depth, 0)
|
|
|
|
|
|
|
|
missing_events = yield self.store.get_missing_events(
|
|
|
|
room_id=room_id,
|
|
|
|
earliest_events=earliest_events,
|
|
|
|
latest_events=latest_events,
|
|
|
|
limit=limit,
|
|
|
|
min_depth=min_depth,
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue(missing_events)
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
@defer.inlineCallbacks
|
2015-01-29 11:50:23 -05:00
|
|
|
@log_function
|
|
|
|
def do_auth(self, origin, event, context, auth_events):
|
|
|
|
# Check if we have all the auth events.
|
2015-06-25 12:18:19 -04:00
|
|
|
current_state = set(e.event_id for e in auth_events.values())
|
2015-01-29 11:50:23 -05:00
|
|
|
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
2015-06-25 12:18:19 -04:00
|
|
|
|
|
|
|
if event_auth_events - current_state:
|
|
|
|
have_events = yield self.store.have_events(
|
|
|
|
event_auth_events - current_state
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
have_events = {}
|
|
|
|
|
|
|
|
have_events.update({
|
|
|
|
e.event_id: ""
|
|
|
|
for e in auth_events.values()
|
|
|
|
})
|
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
seen_events = set(have_events.keys())
|
2015-01-28 11:16:53 -05:00
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
missing_auth = event_auth_events - seen_events - current_state
|
2015-01-28 11:16:53 -05:00
|
|
|
|
|
|
|
if missing_auth:
|
2015-02-17 06:51:22 -05:00
|
|
|
logger.info("Missing auth: %s", missing_auth)
|
2015-01-29 11:50:23 -05:00
|
|
|
# If we don't have all the auth events, we need to get them.
|
2015-02-03 08:57:54 -05:00
|
|
|
try:
|
|
|
|
remote_auth_chain = yield self.replication_layer.get_event_auth(
|
|
|
|
origin, event.room_id, event.event_id
|
|
|
|
)
|
2015-01-29 11:50:23 -05:00
|
|
|
|
2015-02-03 08:57:54 -05:00
|
|
|
seen_remotes = yield self.store.have_events(
|
|
|
|
[e.event_id for e in remote_auth_chain]
|
|
|
|
)
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-03 08:57:54 -05:00
|
|
|
for e in remote_auth_chain:
|
|
|
|
if e.event_id in seen_remotes.keys():
|
|
|
|
continue
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-03 08:57:54 -05:00
|
|
|
if e.event_id == event.event_id:
|
|
|
|
continue
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-03 08:57:54 -05:00
|
|
|
try:
|
|
|
|
auth_ids = [e_id for e_id, _ in e.auth_events]
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e for e in remote_auth_chain
|
|
|
|
if e.event_id in auth_ids
|
|
|
|
}
|
|
|
|
e.internal_metadata.outlier = True
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-03 08:57:54 -05:00
|
|
|
logger.debug(
|
|
|
|
"do_auth %s missing_auth: %s",
|
|
|
|
event.event_id, e.event_id
|
|
|
|
)
|
|
|
|
yield self._handle_new_event(
|
|
|
|
origin, e, auth_events=auth
|
|
|
|
)
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-03 08:57:54 -05:00
|
|
|
if e.event_id in event_auth_events:
|
|
|
|
auth_events[(e.type, e.state_key)] = e
|
|
|
|
except AuthError:
|
|
|
|
pass
|
2015-02-06 09:16:50 -05:00
|
|
|
|
|
|
|
have_events = yield self.store.have_events(
|
|
|
|
[e_id for e_id, _ in event.auth_events]
|
|
|
|
)
|
|
|
|
seen_events = set(have_events.keys())
|
2015-02-03 08:57:54 -05:00
|
|
|
except:
|
|
|
|
# FIXME:
|
|
|
|
logger.exception("Failed to get auth chain")
|
2015-01-29 11:50:23 -05:00
|
|
|
|
2015-01-30 05:48:47 -05:00
|
|
|
# FIXME: Assumes we have and stored all the state for all the
|
|
|
|
# prev_events
|
2015-01-29 11:50:23 -05:00
|
|
|
current_state = set(e.event_id for e in auth_events.values())
|
|
|
|
different_auth = event_auth_events - current_state
|
|
|
|
|
|
|
|
if different_auth and not event.internal_metadata.is_outlier():
|
2015-01-28 11:16:53 -05:00
|
|
|
# Do auth conflict res.
|
2015-02-17 06:51:22 -05:00
|
|
|
logger.info("Different auth: %s", different_auth)
|
2015-01-28 11:16:53 -05:00
|
|
|
|
2015-02-13 09:20:05 -05:00
|
|
|
different_events = yield defer.gatherResults(
|
|
|
|
[
|
|
|
|
self.store.get_event(
|
|
|
|
d,
|
|
|
|
allow_none=True,
|
|
|
|
allow_rejected=False,
|
|
|
|
)
|
|
|
|
for d in different_auth
|
|
|
|
if d in have_events and not have_events[d]
|
|
|
|
],
|
|
|
|
consumeErrors=True
|
2015-05-12 08:14:29 -04:00
|
|
|
).addErrback(unwrapFirstError)
|
2015-02-13 09:20:05 -05:00
|
|
|
|
|
|
|
if different_events:
|
|
|
|
local_view = dict(auth_events)
|
|
|
|
remote_view = dict(auth_events)
|
|
|
|
remote_view.update({
|
2015-02-20 09:08:42 -05:00
|
|
|
(d.type, d.state_key): d for d in different_events
|
2015-02-13 09:20:05 -05:00
|
|
|
})
|
|
|
|
|
2015-02-13 10:37:18 -05:00
|
|
|
new_state, prev_state = self.state_handler.resolve_events(
|
2015-02-15 15:20:51 -05:00
|
|
|
[local_view.values(), remote_view.values()],
|
2015-02-13 09:20:05 -05:00
|
|
|
event
|
|
|
|
)
|
|
|
|
|
|
|
|
auth_events.update(new_state)
|
|
|
|
|
|
|
|
current_state = set(e.event_id for e in auth_events.values())
|
|
|
|
different_auth = event_auth_events - current_state
|
|
|
|
|
|
|
|
context.current_state.update(auth_events)
|
|
|
|
context.state_group = None
|
|
|
|
|
|
|
|
if different_auth and not event.internal_metadata.is_outlier():
|
2015-02-17 06:51:22 -05:00
|
|
|
logger.info("Different auth after resolution: %s", different_auth)
|
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
# Only do auth resolution if we have something new to say.
|
|
|
|
# We can't rove an auth failure.
|
|
|
|
do_resolution = False
|
2015-02-06 10:16:26 -05:00
|
|
|
|
|
|
|
provable = [
|
|
|
|
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
|
|
|
|
]
|
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
for e_id in different_auth:
|
|
|
|
if e_id in have_events:
|
2015-02-06 10:16:26 -05:00
|
|
|
if have_events[e_id] in provable:
|
2015-02-06 09:16:50 -05:00
|
|
|
do_resolution = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if do_resolution:
|
|
|
|
# 1. Get what we think is the auth chain.
|
|
|
|
auth_ids = self.auth.compute_auth_events(
|
|
|
|
event, context.current_state
|
2015-02-03 08:57:54 -05:00
|
|
|
)
|
2015-02-06 09:16:50 -05:00
|
|
|
local_auth_chain = yield self.store.get_auth_chain(auth_ids)
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
try:
|
|
|
|
# 2. Get remote difference.
|
|
|
|
result = yield self.replication_layer.query_auth(
|
|
|
|
origin,
|
|
|
|
event.room_id,
|
|
|
|
event.event_id,
|
|
|
|
local_auth_chain,
|
|
|
|
)
|
2015-01-30 11:51:58 -05:00
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
seen_remotes = yield self.store.have_events(
|
|
|
|
[e.event_id for e in result["auth_chain"]]
|
|
|
|
)
|
2015-02-03 08:57:54 -05:00
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
# 3. Process any remote auth chain events we haven't seen.
|
|
|
|
for ev in result["auth_chain"]:
|
|
|
|
if ev.event_id in seen_remotes.keys():
|
|
|
|
continue
|
|
|
|
|
|
|
|
if ev.event_id == event.event_id:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
auth_ids = [e_id for e_id, _ in ev.auth_events]
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e
|
|
|
|
for e in result["auth_chain"]
|
|
|
|
if e.event_id in auth_ids
|
|
|
|
}
|
|
|
|
ev.internal_metadata.outlier = True
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"do_auth %s different_auth: %s",
|
|
|
|
event.event_id, e.event_id
|
|
|
|
)
|
|
|
|
|
|
|
|
yield self._handle_new_event(
|
|
|
|
origin, ev, auth_events=auth
|
|
|
|
)
|
|
|
|
|
|
|
|
if ev.event_id in event_auth_events:
|
|
|
|
auth_events[(ev.type, ev.state_key)] = ev
|
|
|
|
except AuthError:
|
|
|
|
pass
|
2015-01-30 10:57:53 -05:00
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
except:
|
|
|
|
# FIXME:
|
|
|
|
logger.exception("Failed to query auth chain")
|
2015-01-28 11:16:53 -05:00
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
# 4. Look at rejects and their proofs.
|
|
|
|
# TODO.
|
2015-01-28 11:16:53 -05:00
|
|
|
|
2015-02-06 09:16:50 -05:00
|
|
|
context.current_state.update(auth_events)
|
|
|
|
context.state_group = None
|
2015-01-30 06:08:52 -05:00
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
try:
|
2015-01-29 11:50:23 -05:00
|
|
|
self.auth.check(event, auth_events=auth_events)
|
2015-01-28 11:16:53 -05:00
|
|
|
except AuthError:
|
|
|
|
raise
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def construct_auth_difference(self, local_auth, remote_auth):
|
|
|
|
""" Given a local and remote auth chain, find the differences. This
|
|
|
|
assumes that we have already processed all events in remote_auth
|
|
|
|
|
|
|
|
Params:
|
|
|
|
local_auth (list)
|
|
|
|
remote_auth (list)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict
|
|
|
|
"""
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
logger.debug("construct_auth_difference Start!")
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
# TODO: Make sure we are OK with local_auth or remote_auth having more
|
|
|
|
# auth events in them than strictly necessary.
|
|
|
|
|
|
|
|
def sort_fun(ev):
|
|
|
|
return ev.depth, ev.event_id
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
logger.debug("construct_auth_difference after sort_fun!")
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
# We find the differences by starting at the "bottom" of each list
|
|
|
|
# and iterating up on both lists. The lists are ordered by depth and
|
|
|
|
# then event_id, we iterate up both lists until we find the event ids
|
|
|
|
# don't match. Then we look at depth/event_id to see which side is
|
|
|
|
# missing that event, and iterate only up that list. Repeat.
|
|
|
|
|
|
|
|
remote_list = list(remote_auth)
|
|
|
|
remote_list.sort(key=sort_fun)
|
|
|
|
|
|
|
|
local_list = list(local_auth)
|
|
|
|
local_list.sort(key=sort_fun)
|
|
|
|
|
|
|
|
local_iter = iter(local_list)
|
|
|
|
remote_iter = iter(remote_list)
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
logger.debug("construct_auth_difference before get_next!")
|
2015-01-28 11:16:53 -05:00
|
|
|
|
|
|
|
def get_next(it, opt=None):
|
2015-01-29 11:50:23 -05:00
|
|
|
try:
|
|
|
|
return it.next()
|
|
|
|
except:
|
|
|
|
return opt
|
|
|
|
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
|
|
|
|
logger.debug("construct_auth_difference before while")
|
2015-01-28 11:16:53 -05:00
|
|
|
|
|
|
|
missing_remotes = []
|
|
|
|
missing_locals = []
|
2015-01-30 05:48:47 -05:00
|
|
|
while current_local or current_remote:
|
2015-01-28 11:16:53 -05:00
|
|
|
if current_remote is None:
|
|
|
|
missing_locals.append(current_local)
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local is None:
|
|
|
|
missing_remotes.append(current_remote)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local.event_id == current_remote.event_id:
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local.depth < current_remote.depth:
|
|
|
|
missing_locals.append(current_local)
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if current_local.depth > current_remote.depth:
|
|
|
|
missing_remotes.append(current_remote)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# They have the same depth, so we fall back to the event_id order
|
|
|
|
if current_local.event_id < current_remote.event_id:
|
|
|
|
missing_locals.append(current_local)
|
|
|
|
current_local = get_next(local_iter)
|
|
|
|
|
|
|
|
if current_local.event_id > current_remote.event_id:
|
|
|
|
missing_remotes.append(current_remote)
|
|
|
|
current_remote = get_next(remote_iter)
|
|
|
|
continue
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
logger.debug("construct_auth_difference after while")
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
# missing locals should be sent to the server
|
|
|
|
# We should find why we are missing remotes, as they will have been
|
|
|
|
# rejected.
|
|
|
|
|
|
|
|
# Remove events from missing_remotes if they are referencing a missing
|
|
|
|
# remote. We only care about the "root" rejected ones.
|
|
|
|
missing_remote_ids = [e.event_id for e in missing_remotes]
|
|
|
|
base_remote_rejected = list(missing_remotes)
|
|
|
|
for e in missing_remotes:
|
|
|
|
for e_id, _ in e.auth_events:
|
|
|
|
if e_id in missing_remote_ids:
|
2015-02-06 05:53:18 -05:00
|
|
|
try:
|
|
|
|
base_remote_rejected.remove(e)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2015-01-28 11:16:53 -05:00
|
|
|
|
|
|
|
reason_map = {}
|
|
|
|
|
|
|
|
for e in base_remote_rejected:
|
|
|
|
reason = yield self.store.get_rejection_reason(e.event_id)
|
|
|
|
if reason is None:
|
2015-02-06 09:16:50 -05:00
|
|
|
# TODO: e is not in the current state, so we should
|
|
|
|
# construct some proof of that.
|
|
|
|
continue
|
2015-01-28 11:16:53 -05:00
|
|
|
|
|
|
|
reason_map[e.event_id] = reason
|
|
|
|
|
|
|
|
if reason == RejectedReason.AUTH_ERROR:
|
|
|
|
pass
|
|
|
|
elif reason == RejectedReason.REPLACED:
|
|
|
|
# TODO: Get proof
|
|
|
|
pass
|
|
|
|
elif reason == RejectedReason.NOT_ANCESTOR:
|
|
|
|
# TODO: Get proof.
|
|
|
|
pass
|
|
|
|
|
2015-01-29 11:50:23 -05:00
|
|
|
logger.debug("construct_auth_difference returning")
|
|
|
|
|
2015-01-28 11:16:53 -05:00
|
|
|
defer.returnValue({
|
2015-01-29 11:50:23 -05:00
|
|
|
"auth_chain": local_auth,
|
2015-01-28 11:16:53 -05:00
|
|
|
"rejects": {
|
|
|
|
e.event_id: {
|
|
|
|
"reason": reason_map[e.event_id],
|
|
|
|
"proof": None,
|
|
|
|
}
|
|
|
|
for e in base_remote_rejected
|
|
|
|
},
|
2015-01-30 08:34:01 -05:00
|
|
|
"missing": [e.event_id for e in missing_locals],
|
2015-01-28 11:16:53 -05:00
|
|
|
})
|
2015-05-20 06:59:02 -04:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _handle_auth_events(self, origin, auth_events):
|
|
|
|
auth_ids_to_deferred = {}
|
|
|
|
|
|
|
|
def process_auth_ev(ev):
|
|
|
|
auth_ids = [e_id for e_id, _ in ev.auth_events]
|
|
|
|
|
|
|
|
prev_ds = [
|
|
|
|
auth_ids_to_deferred[i]
|
|
|
|
for i in auth_ids
|
|
|
|
if i in auth_ids_to_deferred
|
|
|
|
]
|
|
|
|
|
|
|
|
d = defer.Deferred()
|
|
|
|
|
|
|
|
auth_ids_to_deferred[ev.event_id] = d
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def f(*_):
|
|
|
|
ev.internal_metadata.outlier = True
|
|
|
|
|
|
|
|
try:
|
|
|
|
auth = {
|
|
|
|
(e.type, e.state_key): e for e in auth_events
|
|
|
|
if e.event_id in auth_ids
|
|
|
|
}
|
|
|
|
|
|
|
|
yield self._handle_new_event(
|
|
|
|
origin, ev, auth_events=auth
|
|
|
|
)
|
|
|
|
except:
|
|
|
|
logger.exception(
|
|
|
|
"Failed to handle auth event %s",
|
|
|
|
ev.event_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
d.callback(None)
|
|
|
|
|
|
|
|
if prev_ds:
|
|
|
|
dx = defer.DeferredList(prev_ds)
|
|
|
|
dx.addBoth(f)
|
|
|
|
else:
|
|
|
|
f()
|
|
|
|
|
|
|
|
for e in auth_events:
|
|
|
|
process_auth_ev(e)
|
|
|
|
|
|
|
|
yield defer.DeferredList(auth_ids_to_deferred.values())
|