mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-12-15 14:18:51 -05:00
Merge remote-tracking branch 'upstream/release-v1.41'
This commit is contained in:
commit
f285b4200c
237 changed files with 9601 additions and 6005 deletions
|
|
@ -392,9 +392,6 @@ class ApplicationServicesHandler:
|
|||
protocols[p].append(info)
|
||||
|
||||
def _merge_instances(infos: List[JsonDict]) -> JsonDict:
|
||||
if not infos:
|
||||
return {}
|
||||
|
||||
# Merge the 'instances' lists of multiple results, but just take
|
||||
# the other fields from the first as they ought to be identical
|
||||
# copy the result so as not to corrupt the cached one
|
||||
|
|
@ -406,7 +403,9 @@ class ApplicationServicesHandler:
|
|||
|
||||
return combined
|
||||
|
||||
return {p: _merge_instances(protocols[p]) for p in protocols.keys()}
|
||||
return {
|
||||
p: _merge_instances(protocols[p]) for p in protocols.keys() if protocols[p]
|
||||
}
|
||||
|
||||
async def _get_services_for_event(
|
||||
self, event: EventBase
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ from synapse.util.stringutils import base62_encode
|
|||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.client.v1.login import LoginResponse
|
||||
from synapse.rest.client.login import LoginResponse
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -461,7 +461,7 @@ class AuthHandler(BaseHandler):
|
|||
|
||||
If no auth flows have been completed successfully, raises an
|
||||
InteractiveAuthIncompleteError. To handle this, you can use
|
||||
synapse.rest.client.v2_alpha._base.interactive_auth_handler as a
|
||||
synapse.rest.client._base.interactive_auth_handler as a
|
||||
decorator.
|
||||
|
||||
Args:
|
||||
|
|
@ -543,7 +543,7 @@ class AuthHandler(BaseHandler):
|
|||
# Note that the registration endpoint explicitly removes the
|
||||
# "initial_device_display_name" parameter if it is provided
|
||||
# without a "password" parameter. See the changes to
|
||||
# synapse.rest.client.v2_alpha.register.RegisterRestServlet.on_POST
|
||||
# synapse.rest.client.register.RegisterRestServlet.on_POST
|
||||
# in commit 544722bad23fc31056b9240189c3cbbbf0ffd3f9.
|
||||
if not clientdict:
|
||||
clientdict = session.clientdict
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ class EventAuthHandler:
|
|||
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required rooms to join this room.",
|
||||
"You do not belong to any of the required rooms/spaces to join this room.",
|
||||
)
|
||||
|
||||
async def has_restricted_join_rules(
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ from twisted.internet import defer
|
|||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
RejectedReason,
|
||||
|
|
@ -108,21 +109,33 @@ soft_failed_event_counter = Counter(
|
|||
)
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _NewEventInfo:
|
||||
"""Holds information about a received event, ready for passing to _auth_and_persist_events
|
||||
|
||||
Attributes:
|
||||
event: the received event
|
||||
|
||||
state: the state at that event
|
||||
state: the state at that event, according to /state_ids from a remote
|
||||
homeserver. Only populated for backfilled events which are going to be a
|
||||
new backwards extremity.
|
||||
|
||||
claimed_auth_event_map: a map of (type, state_key) => event for the event's
|
||||
claimed auth_events.
|
||||
|
||||
This can include events which have not yet been persisted, in the case that
|
||||
we are backfilling a batch of events.
|
||||
|
||||
Note: May be incomplete: if we were unable to find all of the claimed auth
|
||||
events. Also, treat the contents with caution: the events might also have
|
||||
been rejected, might not yet have been authorized themselves, or they might
|
||||
be in the wrong room.
|
||||
|
||||
auth_events: the auth_event map for that event
|
||||
"""
|
||||
|
||||
event = attr.ib(type=EventBase)
|
||||
state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
|
||||
auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None)
|
||||
event: EventBase
|
||||
state: Optional[Sequence[EventBase]]
|
||||
claimed_auth_event_map: StateMap[EventBase]
|
||||
|
||||
|
||||
class FederationHandler(BaseHandler):
|
||||
|
|
@ -207,8 +220,6 @@ class FederationHandler(BaseHandler):
|
|||
room_id = pdu.room_id
|
||||
event_id = pdu.event_id
|
||||
|
||||
logger.info("handling received PDU: %s", pdu)
|
||||
|
||||
# We reprocess pdus when we have seen them only as outliers
|
||||
existing = await self.store.get_event(
|
||||
event_id, allow_none=True, allow_rejected=True
|
||||
|
|
@ -216,14 +227,19 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
# FIXME: Currently we fetch an event again when we already have it
|
||||
# if it has been marked as an outlier.
|
||||
|
||||
already_seen = existing and (
|
||||
not existing.internal_metadata.is_outlier()
|
||||
or pdu.internal_metadata.is_outlier()
|
||||
)
|
||||
if already_seen:
|
||||
logger.debug("Already seen pdu")
|
||||
return
|
||||
if existing:
|
||||
if not existing.internal_metadata.is_outlier():
|
||||
logger.info(
|
||||
"Ignoring received event %s which we have already seen", event_id
|
||||
)
|
||||
return
|
||||
if pdu.internal_metadata.is_outlier():
|
||||
logger.info(
|
||||
"Ignoring received outlier %s which we already have as an outlier",
|
||||
event_id,
|
||||
)
|
||||
return
|
||||
logger.info("De-outliering event %s", event_id)
|
||||
|
||||
# do some initial sanity-checking of the event. In particular, make
|
||||
# sure it doesn't have hundreds of prev_events or auth_events, which
|
||||
|
|
@ -262,7 +278,12 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
state = None
|
||||
|
||||
# Get missing pdus if necessary.
|
||||
# Check that the event passes auth based on the state at the event. This is
|
||||
# done for events that are to be added to the timeline (non-outliers).
|
||||
#
|
||||
# Get missing pdus if necessary:
|
||||
# - Fetching any missing prev events to fill in gaps in the graph
|
||||
# - Fetching state if we have a hole in the graph
|
||||
if not pdu.internal_metadata.is_outlier():
|
||||
# We only backfill backwards to the min depth.
|
||||
min_depth = await self.get_min_depth_for_context(pdu.room_id)
|
||||
|
|
@ -313,7 +334,8 @@ class FederationHandler(BaseHandler):
|
|||
"Found all missing prev_events",
|
||||
)
|
||||
|
||||
if prevs - seen:
|
||||
missing_prevs = prevs - seen
|
||||
if missing_prevs:
|
||||
# We've still not been able to get all of the prev_events for this event.
|
||||
#
|
||||
# In this case, we need to fall back to asking another server in the
|
||||
|
|
@ -341,8 +363,8 @@ class FederationHandler(BaseHandler):
|
|||
if sent_to_us_directly:
|
||||
logger.warning(
|
||||
"Rejecting: failed to fetch %d prev events: %s",
|
||||
len(prevs - seen),
|
||||
shortstr(prevs - seen),
|
||||
len(missing_prevs),
|
||||
shortstr(missing_prevs),
|
||||
)
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
|
|
@ -355,9 +377,10 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
logger.info(
|
||||
"Event %s is missing prev_events: calculating state for a "
|
||||
"Event %s is missing prev_events %s: calculating state for a "
|
||||
"backwards extremity",
|
||||
event_id,
|
||||
shortstr(missing_prevs),
|
||||
)
|
||||
|
||||
# Calculate the state after each of the previous events, and
|
||||
|
|
@ -375,7 +398,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
# Ask the remote server for the states we don't
|
||||
# know about
|
||||
for p in prevs - seen:
|
||||
for p in missing_prevs:
|
||||
logger.info("Requesting state after missing prev_event %s", p)
|
||||
|
||||
with nested_logging_context(p):
|
||||
|
|
@ -432,6 +455,13 @@ class FederationHandler(BaseHandler):
|
|||
affected=event_id,
|
||||
)
|
||||
|
||||
# A second round of checks for all events. Check that the event passes auth
|
||||
# based on `auth_events`, this allows us to assert that the event would
|
||||
# have been allowed at some point. If an event passes this check its OK
|
||||
# for it to be used as part of a returned `/state` request, as either
|
||||
# a) we received the event as part of the original join and so trust it, or
|
||||
# b) we'll do a state resolution with existing state before it becomes
|
||||
# part of the "current state", which adds more protection.
|
||||
await self._process_received_pdu(origin, pdu, state=state)
|
||||
|
||||
async def _get_missing_events_for_pdu(
|
||||
|
|
@ -531,21 +561,14 @@ class FederationHandler(BaseHandler):
|
|||
logger.warning("Failed to get prev_events: %s", e)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Got %d prev_events: %s",
|
||||
len(missing_events),
|
||||
shortstr(missing_events),
|
||||
)
|
||||
logger.info("Got %d prev_events", len(missing_events))
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
|
||||
for ev in missing_events:
|
||||
logger.info(
|
||||
"Handling received prev_event %s",
|
||||
ev.event_id,
|
||||
)
|
||||
logger.info("Handling received prev_event %s", ev)
|
||||
with nested_logging_context(ev.event_id):
|
||||
try:
|
||||
await self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
||||
|
|
@ -889,6 +912,79 @@ class FederationHandler(BaseHandler):
|
|||
"resync_device_due_to_pdu", self._resync_device, event.sender
|
||||
)
|
||||
|
||||
await self._handle_marker_event(origin, event)
|
||||
|
||||
async def _handle_marker_event(self, origin: str, marker_event: EventBase):
|
||||
"""Handles backfilling the insertion event when we receive a marker
|
||||
event that points to one.
|
||||
|
||||
Args:
|
||||
origin: Origin of the event. Will be called to get the insertion event
|
||||
marker_event: The event to process
|
||||
"""
|
||||
|
||||
if marker_event.type != EventTypes.MSC2716_MARKER:
|
||||
# Not a marker event
|
||||
return
|
||||
|
||||
if marker_event.rejected_reason is not None:
|
||||
# Rejected event
|
||||
return
|
||||
|
||||
# Skip processing a marker event if the room version doesn't
|
||||
# support it.
|
||||
room_version = await self.store.get_room_version(marker_event.room_id)
|
||||
if not room_version.msc2716_historical:
|
||||
return
|
||||
|
||||
logger.debug("_handle_marker_event: received %s", marker_event)
|
||||
|
||||
insertion_event_id = marker_event.content.get(
|
||||
EventContentFields.MSC2716_MARKER_INSERTION
|
||||
)
|
||||
|
||||
if insertion_event_id is None:
|
||||
# Nothing to retrieve then (invalid marker)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
"_handle_marker_event: backfilling insertion event %s", insertion_event_id
|
||||
)
|
||||
|
||||
await self._get_events_and_persist(
|
||||
origin,
|
||||
marker_event.room_id,
|
||||
[insertion_event_id],
|
||||
)
|
||||
|
||||
insertion_event = await self.store.get_event(
|
||||
insertion_event_id, allow_none=True
|
||||
)
|
||||
if insertion_event is None:
|
||||
logger.warning(
|
||||
"_handle_marker_event: server %s didn't return insertion event %s for marker %s",
|
||||
origin,
|
||||
insertion_event_id,
|
||||
marker_event.event_id,
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
"_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
|
||||
insertion_event,
|
||||
marker_event,
|
||||
)
|
||||
|
||||
await self.store.insert_insertion_extremity(
|
||||
insertion_event_id, marker_event.room_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"_handle_marker_event: insertion extremity added for %s from marker event %s",
|
||||
insertion_event,
|
||||
marker_event,
|
||||
)
|
||||
|
||||
async def _resync_device(self, sender: str) -> None:
|
||||
"""We have detected that the device list for the given user may be out
|
||||
of sync, so we try and resync them.
|
||||
|
|
@ -1000,7 +1096,7 @@ class FederationHandler(BaseHandler):
|
|||
_NewEventInfo(
|
||||
event=ev,
|
||||
state=events_to_state[e_id],
|
||||
auth_events={
|
||||
claimed_auth_event_map={
|
||||
(
|
||||
auth_events[a_id].type,
|
||||
auth_events[a_id].state_key,
|
||||
|
|
@ -1057,9 +1153,19 @@ class FederationHandler(BaseHandler):
|
|||
async def _maybe_backfill_inner(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
) -> bool:
|
||||
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
|
||||
oldest_events_with_depth = (
|
||||
await self.store.get_oldest_event_ids_with_depth_in_room(room_id)
|
||||
)
|
||||
insertion_events_to_be_backfilled = (
|
||||
await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
|
||||
)
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
|
||||
oldest_events_with_depth,
|
||||
insertion_events_to_be_backfilled,
|
||||
)
|
||||
|
||||
if not extremities:
|
||||
if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
|
||||
logger.debug("Not backfilling as no extremeties found.")
|
||||
return False
|
||||
|
||||
|
|
@ -1089,10 +1195,12 @@ class FederationHandler(BaseHandler):
|
|||
# state *before* the event, ignoring the special casing certain event
|
||||
# types have.
|
||||
|
||||
forward_events = await self.store.get_successor_events(list(extremities))
|
||||
forward_event_ids = await self.store.get_successor_events(
|
||||
list(oldest_events_with_depth)
|
||||
)
|
||||
|
||||
extremities_events = await self.store.get_events(
|
||||
forward_events,
|
||||
forward_event_ids,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
|
@ -1106,10 +1214,19 @@ class FederationHandler(BaseHandler):
|
|||
redact=False,
|
||||
check_history_visibility_only=True,
|
||||
)
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
|
||||
)
|
||||
|
||||
if not filtered_extremities:
|
||||
if not filtered_extremities and not insertion_events_to_be_backfilled:
|
||||
return False
|
||||
|
||||
extremities = {
|
||||
**oldest_events_with_depth,
|
||||
# TODO: insertion_events_to_be_backfilled is currently skipping the filtered_extremities checks
|
||||
**insertion_events_to_be_backfilled,
|
||||
}
|
||||
|
||||
# Check if we reached a point where we should start backfilling.
|
||||
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
|
||||
max_depth = sorted_extremeties_tuple[0][1]
|
||||
|
|
@ -1643,10 +1760,8 @@ class FederationHandler(BaseHandler):
|
|||
for p, origin in room_queue:
|
||||
try:
|
||||
logger.info(
|
||||
"Processing queued PDU %s which was received "
|
||||
"while we were joining %s",
|
||||
p.event_id,
|
||||
p.room_id,
|
||||
"Processing queued PDU %s which was received while we were joining",
|
||||
p,
|
||||
)
|
||||
with nested_logging_context(p.event_id):
|
||||
await self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||
|
|
@ -2208,7 +2323,7 @@ class FederationHandler(BaseHandler):
|
|||
event: EventBase,
|
||||
context: EventContext,
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
|
|
@ -2220,17 +2335,18 @@ class FederationHandler(BaseHandler):
|
|||
context:
|
||||
The event context.
|
||||
|
||||
NB that this function potentially modifies it.
|
||||
state:
|
||||
The state events used to check the event for soft-fail. If this is
|
||||
not provided the current state events will be used.
|
||||
auth_events:
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
Normally, our calculated auth_events based on the state of the room
|
||||
at the event's position in the DAG, though occasionally (eg if the
|
||||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
claimed_auth_event_map:
|
||||
A map of (type, state_key) => event for the event's claimed auth_events.
|
||||
Possibly incomplete, and possibly including events that are not yet
|
||||
persisted, or authed, or in the right room.
|
||||
|
||||
Only populated where we may not already have persisted these events -
|
||||
for example, when populating outliers.
|
||||
|
||||
backfilled: True if the event was backfilled.
|
||||
"""
|
||||
context = await self._check_event_auth(
|
||||
|
|
@ -2238,7 +2354,7 @@ class FederationHandler(BaseHandler):
|
|||
event,
|
||||
context,
|
||||
state=state,
|
||||
auth_events=auth_events,
|
||||
claimed_auth_event_map=claimed_auth_event_map,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
|
||||
|
|
@ -2302,7 +2418,7 @@ class FederationHandler(BaseHandler):
|
|||
event,
|
||||
res,
|
||||
state=ev_info.state,
|
||||
auth_events=ev_info.auth_events,
|
||||
claimed_auth_event_map=ev_info.claimed_auth_event_map,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
return res
|
||||
|
|
@ -2568,7 +2684,7 @@ class FederationHandler(BaseHandler):
|
|||
event: EventBase,
|
||||
context: EventContext,
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> EventContext:
|
||||
"""
|
||||
|
|
@ -2580,21 +2696,19 @@ class FederationHandler(BaseHandler):
|
|||
context:
|
||||
The event context.
|
||||
|
||||
NB that this function potentially modifies it.
|
||||
state:
|
||||
The state events used to check the event for soft-fail. If this is
|
||||
not provided the current state events will be used.
|
||||
auth_events:
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
Normally, our calculated auth_events based on the state of the room
|
||||
at the event's position in the DAG, though occasionally (eg if the
|
||||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
claimed_auth_event_map:
|
||||
A map of (type, state_key) => event for the event's claimed auth_events.
|
||||
Possibly incomplete, and possibly including events that are not yet
|
||||
persisted, or authed, or in the right room.
|
||||
|
||||
Also NB that this function adds entries to it.
|
||||
Only populated where we may not already have persisted these events -
|
||||
for example, when populating outliers, or the state for a backwards
|
||||
extremity.
|
||||
|
||||
If this is not provided, it is calculated from the previous state IDs.
|
||||
backfilled: True if the event was backfilled.
|
||||
|
||||
Returns:
|
||||
|
|
@ -2603,7 +2717,12 @@ class FederationHandler(BaseHandler):
|
|||
room_version = await self.store.get_room_version_id(event.room_id)
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
|
||||
if not auth_events:
|
||||
if claimed_auth_event_map:
|
||||
# if we have a copy of the auth events from the event, use that as the
|
||||
# basis for auth.
|
||||
auth_events = claimed_auth_event_map
|
||||
else:
|
||||
# otherwise, we calculate what the auth events *should* be, and use that
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self._event_auth_handler.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
|
|
@ -2611,18 +2730,11 @@ class FederationHandler(BaseHandler):
|
|||
auth_events_x = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
|
||||
|
||||
# This is a hack to fix some old rooms where the initial join event
|
||||
# didn't reference the create event in its auth events.
|
||||
if event.type == EventTypes.Member and not event.auth_event_ids():
|
||||
if len(event.prev_event_ids()) == 1 and event.depth < 5:
|
||||
c = await self.store.get_event(
|
||||
event.prev_event_ids()[0], allow_none=True
|
||||
)
|
||||
if c and c.type == EventTypes.Create:
|
||||
auth_events[(c.type, c.state_key)] = c
|
||||
|
||||
try:
|
||||
context = await self._update_auth_events_and_context_for_auth(
|
||||
(
|
||||
context,
|
||||
auth_events_for_auth,
|
||||
) = await self._update_auth_events_and_context_for_auth(
|
||||
origin, event, context, auth_events
|
||||
)
|
||||
except Exception:
|
||||
|
|
@ -2635,9 +2747,10 @@ class FederationHandler(BaseHandler):
|
|||
"Ignoring failure and continuing processing of event.",
|
||||
event.event_id,
|
||||
)
|
||||
auth_events_for_auth = auth_events
|
||||
|
||||
try:
|
||||
event_auth.check(room_version_obj, event, auth_events=auth_events)
|
||||
event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth)
|
||||
except AuthError as e:
|
||||
logger.warning("Failed auth resolution for %r because %s", event, e)
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
|
@ -2662,8 +2775,8 @@ class FederationHandler(BaseHandler):
|
|||
origin: str,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
auth_events: MutableStateMap[EventBase],
|
||||
) -> EventContext:
|
||||
input_auth_events: StateMap[EventBase],
|
||||
) -> Tuple[EventContext, StateMap[EventBase]]:
|
||||
"""Helper for _check_event_auth. See there for docs.
|
||||
|
||||
Checks whether a given event has the expected auth events. If it
|
||||
|
|
@ -2680,7 +2793,7 @@ class FederationHandler(BaseHandler):
|
|||
event:
|
||||
context:
|
||||
|
||||
auth_events:
|
||||
input_auth_events:
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
Normally, our calculated auth_events based on the state of the room
|
||||
|
|
@ -2688,11 +2801,12 @@ class FederationHandler(BaseHandler):
|
|||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
|
||||
Also NB that this function adds entries to it.
|
||||
|
||||
Returns:
|
||||
updated context
|
||||
updated context, updated auth event map
|
||||
"""
|
||||
# take a copy of input_auth_events before we modify it.
|
||||
auth_events: MutableStateMap[EventBase] = dict(input_auth_events)
|
||||
|
||||
event_auth_events = set(event.auth_event_ids())
|
||||
|
||||
# missing_auth is the set of the event's auth_events which we don't yet have
|
||||
|
|
@ -2721,7 +2835,7 @@ class FederationHandler(BaseHandler):
|
|||
# The other side isn't around or doesn't implement the
|
||||
# endpoint, so lets just bail out.
|
||||
logger.info("Failed to get event auth from remote: %s", e1)
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
seen_remotes = await self.store.have_seen_events(
|
||||
event.room_id, [e.event_id for e in remote_auth_chain]
|
||||
|
|
@ -2752,7 +2866,10 @@ class FederationHandler(BaseHandler):
|
|||
await self.state_handler.compute_event_context(e)
|
||||
)
|
||||
await self._auth_and_persist_event(
|
||||
origin, e, missing_auth_event_context, auth_events=auth
|
||||
origin,
|
||||
e,
|
||||
missing_auth_event_context,
|
||||
claimed_auth_event_map=auth,
|
||||
)
|
||||
|
||||
if e.event_id in event_auth_events:
|
||||
|
|
@ -2770,14 +2887,14 @@ class FederationHandler(BaseHandler):
|
|||
# obviously be empty
|
||||
# (b) alternatively, why don't we do it earlier?
|
||||
logger.info("Skipping auth_event fetch for outlier")
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
different_auth = event_auth_events.difference(
|
||||
e.event_id for e in auth_events.values()
|
||||
)
|
||||
|
||||
if not different_auth:
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
logger.info(
|
||||
"auth_events refers to events which are not in our calculated auth "
|
||||
|
|
@ -2803,7 +2920,7 @@ class FederationHandler(BaseHandler):
|
|||
# XXX: should we reject the event in this case? It feels like we should,
|
||||
# but then shouldn't we also do so if we've failed to fetch any of the
|
||||
# auth events?
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
# now we state-resolve between our own idea of the auth events, and the remote's
|
||||
# idea of them.
|
||||
|
|
@ -2833,7 +2950,7 @@ class FederationHandler(BaseHandler):
|
|||
event, context, auth_events
|
||||
)
|
||||
|
||||
return context
|
||||
return context, auth_events
|
||||
|
||||
async def _update_context_for_auth_events(
|
||||
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
|
||||
|
|
|
|||
|
|
@ -824,6 +824,7 @@ class IdentityHandler(BaseHandler):
|
|||
room_avatar_url: str,
|
||||
room_join_rules: str,
|
||||
room_name: str,
|
||||
room_type: Optional[str],
|
||||
inviter_display_name: str,
|
||||
inviter_avatar_url: str,
|
||||
id_access_token: Optional[str] = None,
|
||||
|
|
@ -843,6 +844,7 @@ class IdentityHandler(BaseHandler):
|
|||
notifications.
|
||||
room_join_rules: The join rules of the email (e.g. "public").
|
||||
room_name: The m.room.name of the room.
|
||||
room_type: The type of the room from its m.room.create event (e.g "m.space").
|
||||
inviter_display_name: The current display name of the
|
||||
inviter.
|
||||
inviter_avatar_url: The URL of the inviter's avatar.
|
||||
|
|
@ -869,6 +871,10 @@ class IdentityHandler(BaseHandler):
|
|||
"sender_display_name": inviter_display_name,
|
||||
"sender_avatar_url": inviter_avatar_url,
|
||||
}
|
||||
|
||||
if room_type is not None:
|
||||
invite_config["org.matrix.msc3288.room_type"] = room_type
|
||||
|
||||
# If a custom web client location is available, include it in the request.
|
||||
if self._web_client_location:
|
||||
invite_config["org.matrix.web_client_location"] = self._web_client_location
|
||||
|
|
|
|||
|
|
@ -1184,8 +1184,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
new_fields = {"state": presence}
|
||||
|
||||
if not ignore_status_msg:
|
||||
msg = status_msg if presence != PresenceState.OFFLINE else None
|
||||
new_fields["status_msg"] = msg
|
||||
new_fields["status_msg"] = status_msg
|
||||
|
||||
if presence == PresenceState.ONLINE or (
|
||||
presence == PresenceState.BUSY and self._busy_presence_enabled
|
||||
|
|
@ -1478,7 +1477,7 @@ def format_user_presence_state(
|
|||
content["user_id"] = state.user_id
|
||||
if state.last_active_ts:
|
||||
content["last_active_ago"] = now - state.last_active_ts
|
||||
if state.status_msg and state.state != PresenceState.OFFLINE:
|
||||
if state.status_msg:
|
||||
content["status_msg"] = state.status_msg
|
||||
if state.state == PresenceState.ONLINE:
|
||||
content["currently_active"] = state.currently_active
|
||||
|
|
@ -1840,9 +1839,7 @@ def handle_timeout(
|
|||
# don't set them as offline.
|
||||
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
|
||||
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
|
||||
state = state.copy_and_replace(
|
||||
state=PresenceState.OFFLINE, status_msg=None
|
||||
)
|
||||
state = state.copy_and_replace(state=PresenceState.OFFLINE)
|
||||
changed = True
|
||||
else:
|
||||
# We expect to be poked occasionally by the other side.
|
||||
|
|
@ -1850,7 +1847,7 @@ def handle_timeout(
|
|||
# no one gets stuck online forever.
|
||||
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
|
||||
# The other side seems to have disappeared.
|
||||
state = state.copy_and_replace(state=PresenceState.OFFLINE, status_msg=None)
|
||||
state = state.copy_and_replace(state=PresenceState.OFFLINE)
|
||||
changed = True
|
||||
|
||||
return state if changed else None
|
||||
|
|
|
|||
|
|
@ -70,7 +70,8 @@ class ReceiptsHandler(BaseHandler):
|
|||
)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Ignoring receipt from %s as we're not in the room",
|
||||
"Ignoring receipt for room %r from server %s as we're not in the room",
|
||||
room_id,
|
||||
origin,
|
||||
)
|
||||
continue
|
||||
|
|
@ -188,7 +189,14 @@ class ReceiptEventSource:
|
|||
|
||||
new_users = {}
|
||||
for rr_user_id, user_rr in m_read.items():
|
||||
hidden = user_rr.get("hidden", None)
|
||||
try:
|
||||
hidden = user_rr.get("hidden")
|
||||
except AttributeError:
|
||||
# Due to https://github.com/matrix-org/synapse/issues/10376
|
||||
# there are cases where user_rr is a string, in those cases
|
||||
# we just ignore the read receipt
|
||||
continue
|
||||
|
||||
if hidden is not True or rr_user_id == user_id:
|
||||
new_users[rr_user_id] = user_rr.copy()
|
||||
# If hidden has a value replace hidden with the correct prefixed key
|
||||
|
|
|
|||
|
|
@ -356,6 +356,12 @@ class RoomListHandler(BaseHandler):
|
|||
include_all_networks: bool = False,
|
||||
third_party_instance_id: Optional[str] = None,
|
||||
) -> JsonDict:
|
||||
"""Get the public room list from remote server
|
||||
|
||||
Raises:
|
||||
SynapseError
|
||||
"""
|
||||
|
||||
if not self.enable_room_list_search:
|
||||
return {"chunk": [], "total_room_count_estimate": 0}
|
||||
|
||||
|
|
@ -395,13 +401,16 @@ class RoomListHandler(BaseHandler):
|
|||
limit = None
|
||||
since_token = None
|
||||
|
||||
res = await self._get_remote_list_cached(
|
||||
server_name,
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
try:
|
||||
res = await self._get_remote_list_cached(
|
||||
server_name,
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
except (RequestSendFailed, HttpResponseException):
|
||||
raise SynapseError(502, "Failed to fetch room list")
|
||||
|
||||
if search_filter:
|
||||
res = {
|
||||
|
|
@ -423,20 +432,21 @@ class RoomListHandler(BaseHandler):
|
|||
include_all_networks: bool = False,
|
||||
third_party_instance_id: Optional[str] = None,
|
||||
) -> JsonDict:
|
||||
"""Wrapper around FederationClient.get_public_rooms that caches the
|
||||
result.
|
||||
"""
|
||||
|
||||
repl_layer = self.hs.get_federation_client()
|
||||
if search_filter:
|
||||
# We can't cache when asking for search
|
||||
try:
|
||||
return await repl_layer.get_public_rooms(
|
||||
server_name,
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
except (RequestSendFailed, HttpResponseException):
|
||||
raise SynapseError(502, "Failed to fetch room list")
|
||||
return await repl_layer.get_public_rooms(
|
||||
server_name,
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
key = (
|
||||
server_name,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,12 @@ from http import HTTPStatus
|
|||
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
|
|
@ -1237,6 +1242,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
if room_name_event:
|
||||
room_name = room_name_event.content.get("name", "")
|
||||
|
||||
room_type = None
|
||||
room_create_event = room_state.get((EventTypes.Create, ""))
|
||||
if room_create_event:
|
||||
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
|
||||
room_join_rules = ""
|
||||
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
|
|
@ -1263,6 +1273,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
room_avatar_url=room_avatar_url,
|
||||
room_join_rules=room_join_rules,
|
||||
room_name=room_name,
|
||||
room_type=room_type,
|
||||
inviter_display_name=inviter_display_name,
|
||||
inviter_avatar_url=inviter_avatar_url,
|
||||
id_access_token=id_access_token,
|
||||
|
|
|
|||
1171
synapse/handlers/room_summary.py
Normal file
1171
synapse/handlers/room_summary.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -16,7 +16,12 @@ import email.utils
|
|||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING
|
||||
from io import BytesIO
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IReactorTCP
|
||||
from twisted.mail.smtp import ESMTPSenderFactory
|
||||
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
|
||||
|
|
@ -26,19 +31,75 @@ if TYPE_CHECKING:
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def _sendmail(
|
||||
reactor: IReactorTCP,
|
||||
smtphost: str,
|
||||
smtpport: int,
|
||||
from_addr: str,
|
||||
to_addr: str,
|
||||
msg_bytes: bytes,
|
||||
username: Optional[bytes] = None,
|
||||
password: Optional[bytes] = None,
|
||||
require_auth: bool = False,
|
||||
require_tls: bool = False,
|
||||
tls_hostname: Optional[str] = None,
|
||||
) -> None:
|
||||
"""A simple wrapper around ESMTPSenderFactory, to allow substitution in tests
|
||||
|
||||
Params:
|
||||
reactor: reactor to use to make the outbound connection
|
||||
smtphost: hostname to connect to
|
||||
smtpport: port to connect to
|
||||
from_addr: "From" address for email
|
||||
to_addr: "To" address for email
|
||||
msg_bytes: Message content
|
||||
username: username to authenticate with, if auth is enabled
|
||||
password: password to give when authenticating
|
||||
require_auth: if auth is not offered, fail the request
|
||||
require_tls: if TLS is not offered, fail the reqest
|
||||
tls_hostname: TLS hostname to check for. None to disable TLS.
|
||||
"""
|
||||
msg = BytesIO(msg_bytes)
|
||||
|
||||
d: "Deferred[object]" = Deferred()
|
||||
|
||||
factory = ESMTPSenderFactory(
|
||||
username,
|
||||
password,
|
||||
from_addr,
|
||||
to_addr,
|
||||
msg,
|
||||
d,
|
||||
heloFallback=True,
|
||||
requireAuthentication=require_auth,
|
||||
requireTransportSecurity=require_tls,
|
||||
hostname=tls_hostname,
|
||||
)
|
||||
|
||||
# the IReactorTCP interface claims host has to be a bytes, which seems to be wrong
|
||||
reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type]
|
||||
|
||||
await make_deferred_yieldable(d)
|
||||
|
||||
|
||||
class SendEmailHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
|
||||
self._sendmail = hs.get_sendmail()
|
||||
self._reactor = hs.get_reactor()
|
||||
|
||||
self._from = hs.config.email.email_notif_from
|
||||
self._smtp_host = hs.config.email.email_smtp_host
|
||||
self._smtp_port = hs.config.email.email_smtp_port
|
||||
self._smtp_user = hs.config.email.email_smtp_user
|
||||
self._smtp_pass = hs.config.email.email_smtp_pass
|
||||
|
||||
user = hs.config.email.email_smtp_user
|
||||
self._smtp_user = user.encode("utf-8") if user is not None else None
|
||||
passwd = hs.config.email.email_smtp_pass
|
||||
self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None
|
||||
self._require_transport_security = hs.config.email.require_transport_security
|
||||
self._enable_tls = hs.config.email.enable_smtp_tls
|
||||
|
||||
self._sendmail = _sendmail
|
||||
|
||||
async def send_email(
|
||||
self,
|
||||
|
|
@ -82,17 +143,16 @@ class SendEmailHandler:
|
|||
|
||||
logger.info("Sending email to %s" % email_address)
|
||||
|
||||
await make_deferred_yieldable(
|
||||
self._sendmail(
|
||||
self._smtp_host,
|
||||
raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
reactor=self._reactor,
|
||||
port=self._smtp_port,
|
||||
requireAuthentication=self._smtp_user is not None,
|
||||
username=self._smtp_user,
|
||||
password=self._smtp_pass,
|
||||
requireTransportSecurity=self._require_transport_security,
|
||||
)
|
||||
await self._sendmail(
|
||||
self._reactor,
|
||||
self._smtp_host,
|
||||
self._smtp_port,
|
||||
raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
username=self._smtp_user,
|
||||
password=self._smtp_pass,
|
||||
require_auth=self._smtp_user is not None,
|
||||
require_tls=self._require_transport_security,
|
||||
tls_hostname=self._smtp_host if self._enable_tls else None,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,667 +0,0 @@
|
|||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
from collections import deque
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
HistoryVisibility,
|
||||
JoinRules,
|
||||
Membership,
|
||||
RoomTypes,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import format_event_for_client_v2
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# number of rooms to return. We'll stop once we hit this limit.
|
||||
MAX_ROOMS = 50
|
||||
|
||||
# max number of events to return per room.
|
||||
MAX_ROOMS_PER_SPACE = 50
|
||||
|
||||
# max number of federation servers to hit per room
|
||||
MAX_SERVERS_PER_SPACE = 3
|
||||
|
||||
|
||||
class SpaceSummaryHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._clock = hs.get_clock()
|
||||
self._auth = hs.get_auth()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._store = hs.get_datastore()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._server_name = hs.hostname
|
||||
self._federation_client = hs.get_federation_client()
|
||||
|
||||
async def get_space_summary(
|
||||
self,
|
||||
requester: str,
|
||||
room_id: str,
|
||||
suggested_only: bool = False,
|
||||
max_rooms_per_space: Optional[int] = None,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Implementation of the space summary C-S API
|
||||
|
||||
Args:
|
||||
requester: user id of the user making this request
|
||||
|
||||
room_id: room id to start the summary at
|
||||
|
||||
suggested_only: whether we should only return children with the "suggested"
|
||||
flag set.
|
||||
|
||||
max_rooms_per_space: an optional limit on the number of child rooms we will
|
||||
return. This does not apply to the root room (ie, room_id), and
|
||||
is overridden by MAX_ROOMS_PER_SPACE.
|
||||
|
||||
Returns:
|
||||
summary dict to return
|
||||
"""
|
||||
# first of all, check that the user is in the room in question (or it's
|
||||
# world-readable)
|
||||
await self._auth.check_user_in_room_or_world_readable(room_id, requester)
|
||||
|
||||
# the queue of rooms to process
|
||||
room_queue = deque((_RoomQueueEntry(room_id, ()),))
|
||||
|
||||
# rooms we have already processed
|
||||
processed_rooms: Set[str] = set()
|
||||
|
||||
# events we have already processed. We don't necessarily have their event ids,
|
||||
# so instead we key on (room id, state key)
|
||||
processed_events: Set[Tuple[str, str]] = set()
|
||||
|
||||
rooms_result: List[JsonDict] = []
|
||||
events_result: List[JsonDict] = []
|
||||
|
||||
while room_queue and len(rooms_result) < MAX_ROOMS:
|
||||
queue_entry = room_queue.popleft()
|
||||
room_id = queue_entry.room_id
|
||||
if room_id in processed_rooms:
|
||||
# already done this room
|
||||
continue
|
||||
|
||||
logger.debug("Processing room %s", room_id)
|
||||
|
||||
is_in_room = await self._store.is_host_joined(room_id, self._server_name)
|
||||
|
||||
# The client-specified max_rooms_per_space limit doesn't apply to the
|
||||
# room_id specified in the request, so we ignore it if this is the
|
||||
# first room we are processing.
|
||||
max_children = max_rooms_per_space if processed_rooms else None
|
||||
|
||||
if is_in_room:
|
||||
room, events = await self._summarize_local_room(
|
||||
requester, None, room_id, suggested_only, max_children
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Query of local room %s returned events %s",
|
||||
room_id,
|
||||
["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
|
||||
)
|
||||
|
||||
if room:
|
||||
rooms_result.append(room)
|
||||
else:
|
||||
fed_rooms, fed_events = await self._summarize_remote_room(
|
||||
queue_entry,
|
||||
suggested_only,
|
||||
max_children,
|
||||
exclude_rooms=processed_rooms,
|
||||
)
|
||||
|
||||
# The results over federation might include rooms that the we,
|
||||
# as the requesting server, are allowed to see, but the requesting
|
||||
# user is not permitted see.
|
||||
#
|
||||
# Filter the returned results to only what is accessible to the user.
|
||||
room_ids = set()
|
||||
events = []
|
||||
for room in fed_rooms:
|
||||
fed_room_id = room.get("room_id")
|
||||
if not fed_room_id or not isinstance(fed_room_id, str):
|
||||
continue
|
||||
|
||||
# The room should only be included in the summary if:
|
||||
# a. the user is in the room;
|
||||
# b. the room is world readable; or
|
||||
# c. the user could join the room, e.g. the join rules
|
||||
# are set to public or the user is in a space that
|
||||
# has been granted access to the room.
|
||||
#
|
||||
# Note that we know the user is not in the root room (which is
|
||||
# why the remote call was made in the first place), but the user
|
||||
# could be in one of the children rooms and we just didn't know
|
||||
# about the link.
|
||||
|
||||
# The API doesn't return the room version so assume that a
|
||||
# join rule of knock is valid.
|
||||
include_room = (
|
||||
room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
|
||||
or room.get("world_readable") is True
|
||||
)
|
||||
|
||||
# Check if the user is a member of any of the allowed spaces
|
||||
# from the response.
|
||||
allowed_rooms = room.get("allowed_spaces")
|
||||
if (
|
||||
not include_room
|
||||
and allowed_rooms
|
||||
and isinstance(allowed_rooms, list)
|
||||
):
|
||||
include_room = await self._event_auth_handler.is_user_in_rooms(
|
||||
allowed_rooms, requester
|
||||
)
|
||||
|
||||
# Finally, if this isn't the requested room, check ourselves
|
||||
# if we can access the room.
|
||||
if not include_room and fed_room_id != queue_entry.room_id:
|
||||
include_room = await self._is_room_accessible(
|
||||
fed_room_id, requester, None
|
||||
)
|
||||
|
||||
# The user can see the room, include it!
|
||||
if include_room:
|
||||
rooms_result.append(room)
|
||||
room_ids.add(fed_room_id)
|
||||
|
||||
# All rooms returned don't need visiting again (even if the user
|
||||
# didn't have access to them).
|
||||
processed_rooms.add(fed_room_id)
|
||||
|
||||
for event in fed_events:
|
||||
if event.get("room_id") in room_ids:
|
||||
events.append(event)
|
||||
|
||||
logger.debug(
|
||||
"Query of %s returned rooms %s, events %s",
|
||||
room_id,
|
||||
[room.get("room_id") for room in fed_rooms],
|
||||
["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in fed_events],
|
||||
)
|
||||
|
||||
# the room we queried may or may not have been returned, but don't process
|
||||
# it again, anyway.
|
||||
processed_rooms.add(room_id)
|
||||
|
||||
# XXX: is it ok that we blindly iterate through any events returned by
|
||||
# a remote server, whether or not they actually link to any rooms in our
|
||||
# tree?
|
||||
for ev in events:
|
||||
# remote servers might return events we have already processed
|
||||
# (eg, Dendrite returns inward pointers as well as outward ones), so
|
||||
# we need to filter them out, to avoid returning duplicate links to the
|
||||
# client.
|
||||
ev_key = (ev["room_id"], ev["state_key"])
|
||||
if ev_key in processed_events:
|
||||
continue
|
||||
events_result.append(ev)
|
||||
|
||||
# add the child to the queue. we have already validated
|
||||
# that the vias are a list of server names.
|
||||
room_queue.append(
|
||||
_RoomQueueEntry(ev["state_key"], ev["content"]["via"])
|
||||
)
|
||||
processed_events.add(ev_key)
|
||||
|
||||
# Before returning to the client, remove the allowed_spaces key for any
|
||||
# rooms.
|
||||
for room in rooms_result:
|
||||
room.pop("allowed_spaces", None)
|
||||
|
||||
return {"rooms": rooms_result, "events": events_result}
|
||||
|
||||
async def federation_space_summary(
|
||||
self,
|
||||
origin: str,
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_rooms_per_space: Optional[int],
|
||||
exclude_rooms: Iterable[str],
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Implementation of the space summary Federation API
|
||||
|
||||
Args:
|
||||
origin: The server requesting the spaces summary.
|
||||
|
||||
room_id: room id to start the summary at
|
||||
|
||||
suggested_only: whether we should only return children with the "suggested"
|
||||
flag set.
|
||||
|
||||
max_rooms_per_space: an optional limit on the number of child rooms we will
|
||||
return. Unlike the C-S API, this applies to the root room (room_id).
|
||||
It is clipped to MAX_ROOMS_PER_SPACE.
|
||||
|
||||
exclude_rooms: a list of rooms to skip over (presumably because the
|
||||
calling server has already seen them).
|
||||
|
||||
Returns:
|
||||
summary dict to return
|
||||
"""
|
||||
# the queue of rooms to process
|
||||
room_queue = deque((room_id,))
|
||||
|
||||
# the set of rooms that we should not walk further. Initialise it with the
|
||||
# excluded-rooms list; we will add other rooms as we process them so that
|
||||
# we do not loop.
|
||||
processed_rooms: Set[str] = set(exclude_rooms)
|
||||
|
||||
rooms_result: List[JsonDict] = []
|
||||
events_result: List[JsonDict] = []
|
||||
|
||||
while room_queue and len(rooms_result) < MAX_ROOMS:
|
||||
room_id = room_queue.popleft()
|
||||
if room_id in processed_rooms:
|
||||
# already done this room
|
||||
continue
|
||||
|
||||
logger.debug("Processing room %s", room_id)
|
||||
|
||||
room, events = await self._summarize_local_room(
|
||||
None, origin, room_id, suggested_only, max_rooms_per_space
|
||||
)
|
||||
|
||||
processed_rooms.add(room_id)
|
||||
|
||||
if room:
|
||||
rooms_result.append(room)
|
||||
events_result.extend(events)
|
||||
|
||||
# add any children to the queue
|
||||
room_queue.extend(edge_event["state_key"] for edge_event in events)
|
||||
|
||||
return {"rooms": rooms_result, "events": events_result}
|
||||
|
||||
async def _summarize_local_room(
|
||||
self,
|
||||
requester: Optional[str],
|
||||
origin: Optional[str],
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_children: Optional[int],
|
||||
) -> Tuple[Optional[JsonDict], Sequence[JsonDict]]:
|
||||
"""
|
||||
Generate a room entry and a list of event entries for a given room.
|
||||
|
||||
Args:
|
||||
requester:
|
||||
The user requesting the summary, if it is a local request. None
|
||||
if this is a federation request.
|
||||
origin:
|
||||
The server requesting the summary, if it is a federation request.
|
||||
None if this is a local request.
|
||||
room_id: The room ID to summarize.
|
||||
suggested_only: True if only suggested children should be returned.
|
||||
Otherwise, all children are returned.
|
||||
max_children:
|
||||
The maximum number of children rooms to include. This is capped
|
||||
to a server-set limit.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
The room information, if the room should be returned to the
|
||||
user. None, otherwise.
|
||||
|
||||
An iterable of the sorted children events. This may be limited
|
||||
to a maximum size or may include all children.
|
||||
"""
|
||||
if not await self._is_room_accessible(room_id, requester, origin):
|
||||
return None, ()
|
||||
|
||||
room_entry = await self._build_room_entry(room_id)
|
||||
|
||||
# If the room is not a space, return just the room information.
|
||||
if room_entry.get("room_type") != RoomTypes.SPACE:
|
||||
return room_entry, ()
|
||||
|
||||
# Otherwise, look for child rooms/spaces.
|
||||
child_events = await self._get_child_events(room_id)
|
||||
|
||||
if suggested_only:
|
||||
# we only care about suggested children
|
||||
child_events = filter(_is_suggested_child_event, child_events)
|
||||
|
||||
if max_children is None or max_children > MAX_ROOMS_PER_SPACE:
|
||||
max_children = MAX_ROOMS_PER_SPACE
|
||||
|
||||
now = self._clock.time_msec()
|
||||
events_result: List[JsonDict] = []
|
||||
for edge_event in itertools.islice(child_events, max_children):
|
||||
events_result.append(
|
||||
await self._event_serializer.serialize_event(
|
||||
edge_event,
|
||||
time_now=now,
|
||||
event_format=format_event_for_client_v2,
|
||||
)
|
||||
)
|
||||
|
||||
return room_entry, events_result
|
||||
|
||||
async def _summarize_remote_room(
|
||||
self,
|
||||
room: "_RoomQueueEntry",
|
||||
suggested_only: bool,
|
||||
max_children: Optional[int],
|
||||
exclude_rooms: Iterable[str],
|
||||
) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
|
||||
"""
|
||||
Request room entries and a list of event entries for a given room by querying a remote server.
|
||||
|
||||
Args:
|
||||
room: The room to summarize.
|
||||
suggested_only: True if only suggested children should be returned.
|
||||
Otherwise, all children are returned.
|
||||
max_children:
|
||||
The maximum number of children rooms to include. This is capped
|
||||
to a server-set limit.
|
||||
exclude_rooms:
|
||||
Rooms IDs which do not need to be summarized.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
An iterable of rooms.
|
||||
|
||||
An iterable of the sorted children events. This may be limited
|
||||
to a maximum size or may include all children.
|
||||
"""
|
||||
room_id = room.room_id
|
||||
logger.info("Requesting summary for %s via %s", room_id, room.via)
|
||||
|
||||
# we need to make the exclusion list json-serialisable
|
||||
exclude_rooms = list(exclude_rooms)
|
||||
|
||||
via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE)
|
||||
try:
|
||||
res = await self._federation_client.get_space_summary(
|
||||
via,
|
||||
room_id,
|
||||
suggested_only=suggested_only,
|
||||
max_rooms_per_space=max_children,
|
||||
exclude_rooms=exclude_rooms,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Unable to get summary of %s via federation: %s",
|
||||
room_id,
|
||||
e,
|
||||
exc_info=logger.isEnabledFor(logging.DEBUG),
|
||||
)
|
||||
return (), ()
|
||||
|
||||
return res.rooms, tuple(
|
||||
ev.data for ev in res.events if ev.event_type == EventTypes.SpaceChild
|
||||
)
|
||||
|
||||
async def _is_room_accessible(
|
||||
self, room_id: str, requester: Optional[str], origin: Optional[str]
|
||||
) -> bool:
|
||||
"""
|
||||
Calculate whether the room should be shown in the spaces summary.
|
||||
|
||||
It should be included if:
|
||||
|
||||
* The requester is joined or can join the room (per MSC3173).
|
||||
* The origin server has any user that is joined or can join the room.
|
||||
* The history visibility is set to world readable.
|
||||
|
||||
Args:
|
||||
room_id: The room ID to summarize.
|
||||
requester:
|
||||
The user requesting the summary, if it is a local request. None
|
||||
if this is a federation request.
|
||||
origin:
|
||||
The server requesting the summary, if it is a federation request.
|
||||
None if this is a local request.
|
||||
|
||||
Returns:
|
||||
True if the room should be included in the spaces summary.
|
||||
"""
|
||||
state_ids = await self._store.get_current_state_ids(room_id)
|
||||
|
||||
# If there's no state for the room, it isn't known.
|
||||
if not state_ids:
|
||||
# The user might have a pending invite for the room.
|
||||
if requester and await self._store.get_invite_for_local_user_in_room(
|
||||
requester, room_id
|
||||
):
|
||||
return True
|
||||
|
||||
logger.info("room %s is unknown, omitting from summary", room_id)
|
||||
return False
|
||||
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
|
||||
# Include the room if it has join rules of public or knock.
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event_id:
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
join_rule = join_rules_event.content.get("join_rule")
|
||||
if join_rule == JoinRules.PUBLIC or (
|
||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||
):
|
||||
return True
|
||||
|
||||
# Include the room if it is peekable.
|
||||
hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
if hist_vis_event_id:
|
||||
hist_vis_ev = await self._store.get_event(hist_vis_event_id)
|
||||
hist_vis = hist_vis_ev.content.get("history_visibility")
|
||||
if hist_vis == HistoryVisibility.WORLD_READABLE:
|
||||
return True
|
||||
|
||||
# Otherwise we need to check information specific to the user or server.
|
||||
|
||||
# If we have an authenticated requesting user, check if they are a member
|
||||
# of the room (or can join the room).
|
||||
if requester:
|
||||
member_event_id = state_ids.get((EventTypes.Member, requester), None)
|
||||
|
||||
# If they're in the room they can see info on it.
|
||||
if member_event_id:
|
||||
member_event = await self._store.get_event(member_event_id)
|
||||
if member_event.membership in (Membership.JOIN, Membership.INVITE):
|
||||
return True
|
||||
|
||||
# Otherwise, check if they should be allowed access via membership in a space.
|
||||
if await self._event_auth_handler.has_restricted_join_rules(
|
||||
state_ids, room_version
|
||||
):
|
||||
allowed_rooms = (
|
||||
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
|
||||
)
|
||||
if await self._event_auth_handler.is_user_in_rooms(
|
||||
allowed_rooms, requester
|
||||
):
|
||||
return True
|
||||
|
||||
# If this is a request over federation, check if the host is in the room or
|
||||
# has a user who could join the room.
|
||||
elif origin:
|
||||
if await self._event_auth_handler.check_host_in_room(
|
||||
room_id, origin
|
||||
) or await self._store.is_host_invited(room_id, origin):
|
||||
return True
|
||||
|
||||
# Alternately, if the host has a user in any of the spaces specified
|
||||
# for access, then the host can see this room (and should do filtering
|
||||
# if the requester cannot see it).
|
||||
if await self._event_auth_handler.has_restricted_join_rules(
|
||||
state_ids, room_version
|
||||
):
|
||||
allowed_rooms = (
|
||||
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
|
||||
)
|
||||
for space_id in allowed_rooms:
|
||||
if await self._event_auth_handler.check_host_in_room(
|
||||
space_id, origin
|
||||
):
|
||||
return True
|
||||
|
||||
logger.info(
|
||||
"room %s is unpeekable and requester %s is not a member / not allowed to join, omitting from summary",
|
||||
room_id,
|
||||
requester or origin,
|
||||
)
|
||||
return False
|
||||
|
||||
async def _build_room_entry(self, room_id: str) -> JsonDict:
|
||||
"""Generate en entry suitable for the 'rooms' list in the summary response"""
|
||||
stats = await self._store.get_room_with_stats(room_id)
|
||||
|
||||
# currently this should be impossible because we call
|
||||
# check_user_in_room_or_world_readable on the room before we get here, so
|
||||
# there should always be an entry
|
||||
assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
|
||||
|
||||
current_state_ids = await self._store.get_current_state_ids(room_id)
|
||||
create_event = await self._store.get_event(
|
||||
current_state_ids[(EventTypes.Create, "")]
|
||||
)
|
||||
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
allowed_rooms = None
|
||||
if await self._event_auth_handler.has_restricted_join_rules(
|
||||
current_state_ids, room_version
|
||||
):
|
||||
allowed_rooms = await self._event_auth_handler.get_rooms_that_allow_join(
|
||||
current_state_ids
|
||||
)
|
||||
|
||||
entry = {
|
||||
"room_id": stats["room_id"],
|
||||
"name": stats["name"],
|
||||
"topic": stats["topic"],
|
||||
"canonical_alias": stats["canonical_alias"],
|
||||
"num_joined_members": stats["joined_members"],
|
||||
"avatar_url": stats["avatar"],
|
||||
"join_rules": stats["join_rules"],
|
||||
"world_readable": (
|
||||
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
|
||||
),
|
||||
"guest_can_join": stats["guest_access"] == "can_join",
|
||||
"creation_ts": create_event.origin_server_ts,
|
||||
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
|
||||
"allowed_spaces": allowed_rooms,
|
||||
}
|
||||
|
||||
# Filter out Nones – rather omit the field altogether
|
||||
room_entry = {k: v for k, v in entry.items() if v is not None}
|
||||
|
||||
return room_entry
|
||||
|
||||
async def _get_child_events(self, room_id: str) -> Iterable[EventBase]:
|
||||
"""
|
||||
Get the child events for a given room.
|
||||
|
||||
The returned results are sorted for stability.
|
||||
|
||||
Args:
|
||||
room_id: The room id to get the children of.
|
||||
|
||||
Returns:
|
||||
An iterable of sorted child events.
|
||||
"""
|
||||
|
||||
# look for child rooms/spaces.
|
||||
current_state_ids = await self._store.get_current_state_ids(room_id)
|
||||
|
||||
events = await self._store.get_events_as_list(
|
||||
[
|
||||
event_id
|
||||
for key, event_id in current_state_ids.items()
|
||||
if key[0] == EventTypes.SpaceChild
|
||||
]
|
||||
)
|
||||
|
||||
# filter out any events without a "via" (which implies it has been redacted),
|
||||
# and order to ensure we return stable results.
|
||||
return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class _RoomQueueEntry:
|
||||
room_id = attr.ib(type=str)
|
||||
via = attr.ib(type=Sequence[str])
|
||||
|
||||
|
||||
def _has_valid_via(e: EventBase) -> bool:
|
||||
via = e.content.get("via")
|
||||
if not via or not isinstance(via, Sequence):
|
||||
return False
|
||||
for v in via:
|
||||
if not isinstance(v, str):
|
||||
logger.debug("Ignoring edge event %s with invalid via entry", e.event_id)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_suggested_child_event(edge_event: EventBase) -> bool:
|
||||
suggested = edge_event.content.get("suggested")
|
||||
if isinstance(suggested, bool) and suggested:
|
||||
return True
|
||||
logger.debug("Ignorning not-suggested child %s", edge_event.state_key)
|
||||
return False
|
||||
|
||||
|
||||
# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive.
|
||||
_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
|
||||
|
||||
|
||||
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
|
||||
"""
|
||||
Generate a value for comparing two child events for ordering.
|
||||
|
||||
The rules for ordering are supposed to be:
|
||||
|
||||
1. The 'order' key, if it is valid.
|
||||
2. The 'origin_server_ts' of the 'm.room.create' event.
|
||||
3. The 'room_id'.
|
||||
|
||||
But we skip step 2 since we may not have any state from the room.
|
||||
|
||||
Args:
|
||||
child: The event for generating a comparison key.
|
||||
|
||||
Returns:
|
||||
The comparison key as a tuple of:
|
||||
False if the ordering is valid.
|
||||
The ordering field.
|
||||
The room ID.
|
||||
"""
|
||||
order = child.content.get("order")
|
||||
# If order is not a string or doesn't meet the requirements, ignore it.
|
||||
if not isinstance(order, str):
|
||||
order = None
|
||||
elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
|
||||
order = None
|
||||
|
||||
# Items without an order come last.
|
||||
return (order is None, order, child.room_id)
|
||||
|
|
@ -269,14 +269,22 @@ class SyncHandler:
|
|||
self.presence_handler = hs.get_presence_handler()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.clock = hs.get_clock()
|
||||
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
|
||||
hs.get_clock(), "sync"
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
# TODO: flush cache entries on subsequent sync request.
|
||||
# Once we get the next /sync request (ie, one with the same access token
|
||||
# that sets 'since' to 'next_batch'), we know that device won't need a
|
||||
# cached result any more, and we could flush the entry from the cache to save
|
||||
# memory.
|
||||
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
|
||||
hs.get_clock(),
|
||||
"sync",
|
||||
timeout_ms=hs.config.caches.sync_response_cache_duration,
|
||||
)
|
||||
|
||||
# ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
|
||||
self.lazy_loaded_members_cache: ExpiringCache[
|
||||
Tuple[str, Optional[str]], LruCache[str, str]
|
||||
|
|
|
|||
|
|
@ -335,7 +335,8 @@ class TypingWriterHandler(FollowerTypingHandler):
|
|||
)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Ignoring typing update from %s as we're not in the room",
|
||||
"Ignoring typing update for room %r from server %s as we're not in the room",
|
||||
room_id,
|
||||
origin,
|
||||
)
|
||||
return
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue