Merge branch 'develop' into rav/drop_re_signing_hacks

This commit is contained in:
Richard van der Hoff 2018-07-04 07:13:38 +01:00 committed by GitHub
commit a4ab491371
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
152 changed files with 2187 additions and 641 deletions

View file

@ -39,11 +39,12 @@ from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError, logcontext
from synapse.util.metrics import measure_func
from synapse.util.logutils import log_function
from synapse.util.async import run_on_reactor, Linearizer
from synapse.util.async import Linearizer
from synapse.util.frozenutils import unfreeze
from synapse.crypto.event_signing import (
compute_event_signature, add_hashes_and_signatures,
)
from synapse.state import resolve_events_with_factory
from synapse.types import UserID, get_domain_from_id
from synapse.events.utils import prune_event
@ -89,7 +90,9 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
def on_receive_pdu(self, origin, pdu, get_missing=True):
def on_receive_pdu(
self, origin, pdu, get_missing=True, sent_to_us_directly=False,
):
""" Process a PDU received via a federation /send/ transaction, or
via backfill of missing prev_events
@ -103,8 +106,10 @@ class FederationHandler(BaseHandler):
"""
# We reprocess pdus when we have seen them only as outliers
existing = yield self.get_persisted_pdu(
origin, pdu.event_id, do_auth=False
existing = yield self.store.get_event(
pdu.event_id,
allow_none=True,
allow_rejected=True,
)
# FIXME: Currently we fetch an event again when we already have it
@ -161,14 +166,11 @@ class FederationHandler(BaseHandler):
"Ignoring PDU %s for room %s from %s as we've left the room!",
pdu.event_id, pdu.room_id, origin,
)
return
defer.returnValue(None)
state = None
auth_chain = []
fetch_state = False
# Get missing pdus if necessary.
if not pdu.internal_metadata.is_outlier():
# We only backfill backwards to the min depth.
@ -223,26 +225,60 @@ class FederationHandler(BaseHandler):
list(prevs - seen)[:5],
)
if prevs - seen:
logger.info(
"Still missing %d events for room %r: %r...",
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
if sent_to_us_directly and prevs - seen:
# If they have sent it to us directly, and the server
# isn't telling us about the auth events that it's
# made a message referencing, we explode
raise FederationError(
"ERROR",
403,
(
"Your server isn't divulging details about prev_events "
"referenced in this event."
),
affected=pdu.event_id,
)
fetch_state = True
elif prevs - seen:
# Calculate the state of the previous events, and
# de-conflict them to find the current state.
state_groups = []
auth_chains = set()
try:
# Get the state of the events we know about
ours = yield self.store.get_state_groups(pdu.room_id, list(seen))
state_groups.append(ours)
if fetch_state:
# We need to get the state at this event, since we haven't
# processed all the prev events.
logger.debug(
"_handle_new_pdu getting state for %s",
pdu.room_id
)
try:
state, auth_chain = yield self.replication_layer.get_state_for_room(
origin, pdu.room_id, pdu.event_id,
)
except Exception:
logger.exception("Failed to get state for event: %s", pdu.event_id)
# Ask the remote server for the states we don't
# know about
for p in prevs - seen:
state, got_auth_chain = (
yield self.replication_layer.get_state_for_room(
origin, pdu.room_id, p
)
)
auth_chains.update(got_auth_chain)
state_group = {(x.type, x.state_key): x.event_id for x in state}
state_groups.append(state_group)
# Resolve any conflicting state
def fetch(ev_ids):
return self.store.get_events(
ev_ids, get_prev_content=False, check_redacted=False
)
state_map = yield resolve_events_with_factory(
state_groups, {pdu.event_id: pdu}, fetch
)
state = (yield self.store.get_events(state_map.values())).values()
auth_chain = list(auth_chains)
except Exception:
raise FederationError(
"ERROR",
403,
"We can't get valid state history.",
affected=pdu.event_id,
)
yield self._process_received_pdu(
origin,
@ -320,11 +356,17 @@ class FederationHandler(BaseHandler):
for e in missing_events:
logger.info("Handling found event %s", e.event_id)
yield self.on_receive_pdu(
origin,
e,
get_missing=False
)
try:
yield self.on_receive_pdu(
origin,
e,
get_missing=False
)
except FederationError as e:
if e.code == 403:
logger.warn("Event %s failed history check.")
else:
raise
@log_function
@defer.inlineCallbacks
@ -458,6 +500,47 @@ class FederationHandler(BaseHandler):
@measure_func("_filter_events_for_server")
@defer.inlineCallbacks
def _filter_events_for_server(self, server_name, room_id, events):
"""Filter the given events for the given server, redacting those the
server can't see.
Assumes the server is currently in the room.
Returns
list[FrozenEvent]
"""
# First lets check to see if all the events have a history visibility
# of "shared" or "world_readable". If thats the case then we don't
# need to check membership (as we know the server is in the room).
event_to_state_ids = yield self.store.get_state_ids_for_events(
frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
)
)
visibility_ids = set()
for sids in event_to_state_ids.itervalues():
hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
if hist:
visibility_ids.add(hist)
# If we failed to find any history visibility events then the default
# is "shared" visiblity.
if not visibility_ids:
defer.returnValue(events)
event_map = yield self.store.get_events(visibility_ids)
all_open = all(
e.content.get("history_visibility") in (None, "shared", "world_readable")
for e in event_map.itervalues()
)
if all_open:
defer.returnValue(events)
# Ok, so we're dealing with events that have non-trivial visibility
# rules, so we need to also get the memberships of the room.
event_to_state_ids = yield self.store.get_state_ids_for_events(
frozenset(e.event_id for e in events),
types=(
@ -493,7 +576,20 @@ class FederationHandler(BaseHandler):
for e_id, key_to_eid in event_to_state_ids.iteritems()
}
erased_senders = yield self.store.are_users_erased(
e.sender for e in events,
)
def redact_disallowed(event, state):
# if the sender has been gdpr17ed, always return a redacted
# copy of the event.
if erased_senders[event.sender]:
logger.info(
"Sender of %s has been erased, redacting",
event.event_id,
)
return prune_event(event)
if not state:
return event
@ -1371,8 +1467,6 @@ class FederationHandler(BaseHandler):
def get_state_for_pdu(self, room_id, event_id):
"""Returns the state at the event. i.e. not including said event.
"""
yield run_on_reactor()
state_groups = yield self.store.get_state_groups(
room_id, [event_id]
)
@ -1403,8 +1497,6 @@ class FederationHandler(BaseHandler):
def get_state_ids_for_pdu(self, room_id, event_id):
"""Returns the state at the event. i.e. not including said event.
"""
yield run_on_reactor()
state_groups = yield self.store.get_state_groups_ids(
room_id, [event_id]
)
@ -1446,11 +1538,20 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
def get_persisted_pdu(self, origin, event_id, do_auth=True):
""" Get a PDU from the database with given origin and id.
def get_persisted_pdu(self, origin, event_id):
"""Get an event from the database for the given server.
Args:
origin [str]: hostname of server which is requesting the event; we
will check that the server is allowed to see it.
event_id [str]: id of the event being requested
Returns:
Deferred: Results in a `Pdu`.
Deferred[EventBase|None]: None if we know nothing about the event;
otherwise the (possibly-redacted) event.
Raises:
AuthError if the server is not currently in the room
"""
event = yield self.store.get_event(
event_id,
@ -1459,20 +1560,17 @@ class FederationHandler(BaseHandler):
)
if event:
if do_auth:
in_room = yield self.auth.check_host_in_room(
event.room_id,
origin
)
if not in_room:
raise AuthError(403, "Host not in room.")
events = yield self._filter_events_for_server(
origin, event.room_id, [event]
)
event = events[0]
in_room = yield self.auth.check_host_in_room(
event.room_id,
origin
)
if not in_room:
raise AuthError(403, "Host not in room.")
events = yield self._filter_events_for_server(
origin, event.room_id, [event]
)
event = events[0]
defer.returnValue(event)
else:
defer.returnValue(None)
@ -1751,6 +1849,10 @@ class FederationHandler(BaseHandler):
min_depth=min_depth,
)
missing_events = yield self._filter_events_for_server(
origin, room_id, missing_events,
)
defer.returnValue(missing_events)
@defer.inlineCallbacks