2016-03-31 08:08:45 -04:00
|
|
|
#
|
2023-11-21 15:29:58 -05:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
2024-01-23 06:26:48 -05:00
|
|
|
# Copyright 2020 Sorunome
|
|
|
|
# Copyright 2016-2020 The Matrix.org Foundation C.I.C.
|
2023-11-21 15:29:58 -05:00
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2016-03-31 08:08:45 -04:00
|
|
|
#
|
|
|
|
#
|
2018-03-13 12:57:07 -04:00
|
|
|
import abc
|
2016-07-26 11:46:53 -04:00
|
|
|
import logging
|
2020-08-20 15:07:42 -04:00
|
|
|
import random
|
2020-06-16 08:51:47 -04:00
|
|
|
from http import HTTPStatus
|
2023-10-16 07:35:22 -04:00
|
|
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2019-07-29 12:47:27 -04:00
|
|
|
from synapse import types
|
2021-08-04 13:39:57 -04:00
|
|
|
from synapse.api.constants import (
|
|
|
|
AccountDataTypes,
|
|
|
|
EventContentFields,
|
|
|
|
EventTypes,
|
2021-09-06 07:17:16 -04:00
|
|
|
GuestAccess,
|
2021-08-04 13:39:57 -04:00
|
|
|
Membership,
|
|
|
|
)
|
2023-02-10 18:31:05 -05:00
|
|
|
from synapse.api.errors import (
|
|
|
|
AuthError,
|
|
|
|
Codes,
|
|
|
|
PartialStateConflictError,
|
|
|
|
ShadowBanError,
|
|
|
|
SynapseError,
|
|
|
|
)
|
2020-07-31 09:34:42 -04:00
|
|
|
from synapse.api.ratelimiting import Ratelimiter
|
2021-07-26 12:17:00 -04:00
|
|
|
from synapse.event_auth import get_named_level, get_power_level_event
|
2020-05-15 15:05:25 -04:00
|
|
|
from synapse.events import EventBase
|
|
|
|
from synapse.events.snapshot import EventContext
|
2023-09-15 09:37:44 -04:00
|
|
|
from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME
|
2021-08-23 11:25:33 -04:00
|
|
|
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
|
2023-05-03 07:27:33 -04:00
|
|
|
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
2023-08-16 10:19:54 -04:00
|
|
|
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
2022-08-03 13:19:34 -04:00
|
|
|
from synapse.logging import opentracing
|
2023-05-03 07:27:33 -04:00
|
|
|
from synapse.metrics import event_processing_positions
|
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2023-10-16 07:35:22 -04:00
|
|
|
from synapse.storage.databases.main.state_deltas import StateDelta
|
2021-06-09 14:39:51 -04:00
|
|
|
from synapse.types import (
|
|
|
|
JsonDict,
|
|
|
|
Requester,
|
|
|
|
RoomAlias,
|
|
|
|
RoomID,
|
|
|
|
StateMap,
|
|
|
|
UserID,
|
2021-09-06 07:17:16 -04:00
|
|
|
create_requester,
|
2021-06-09 14:39:51 -04:00
|
|
|
get_domain_from_id,
|
|
|
|
)
|
2022-12-12 11:19:30 -05:00
|
|
|
from synapse.types.state import StateFilter
|
2018-08-10 09:50:21 -04:00
|
|
|
from synapse.util.async_helpers import Linearizer
|
2020-09-09 12:22:00 -04:00
|
|
|
from synapse.util.distributor import user_left_room
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2020-08-12 10:05:50 -04:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
|
|
|
|
2016-03-31 08:08:45 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
class NoKnownServersError(SynapseError):
|
|
|
|
"""No server already resident to the room was provided to the join/knock operation."""
|
|
|
|
|
|
|
|
def __init__(self, msg: str = "No known servers"):
|
|
|
|
super().__init__(404, msg)
|
|
|
|
|
|
|
|
|
2020-09-16 15:15:55 -04:00
|
|
|
class RoomMemberHandler(metaclass=abc.ABCMeta):
|
2016-03-31 08:08:45 -04:00
|
|
|
# TODO(paul): This handler currently contains a messy conflation of
|
|
|
|
# low-level API that works on UserID objects and so on, and REST-level
|
|
|
|
# API that takes ID strings and returns pagination chunks. These concerns
|
|
|
|
# ought to be separated out a lot better.
|
|
|
|
|
2020-08-12 10:05:50 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2018-03-01 11:49:12 -05:00
|
|
|
self.hs = hs
|
2022-02-23 06:04:02 -05:00
|
|
|
self.store = hs.get_datastores().main
|
2022-06-01 11:02:53 -04:00
|
|
|
self._storage_controllers = hs.get_storage_controllers()
|
2018-03-01 05:54:37 -05:00
|
|
|
self.auth = hs.get_auth()
|
|
|
|
self.state_handler = hs.get_state_handler()
|
|
|
|
self.config = hs.config
|
2021-09-06 07:17:16 -04:00
|
|
|
self._server_name = hs.hostname
|
2018-03-01 05:54:37 -05:00
|
|
|
|
2020-10-09 07:24:34 -04:00
|
|
|
self.federation_handler = hs.get_federation_handler()
|
|
|
|
self.directory_handler = hs.get_directory_handler()
|
|
|
|
self.identity_handler = hs.get_identity_handler()
|
2019-02-20 02:47:31 -05:00
|
|
|
self.registration_handler = hs.get_registration_handler()
|
2017-08-25 09:34:56 -04:00
|
|
|
self.profile_handler = hs.get_profile_handler()
|
2019-01-18 10:27:11 -05:00
|
|
|
self.event_creation_handler = hs.get_event_creation_handler()
|
2021-01-18 10:47:59 -05:00
|
|
|
self.account_data_handler = hs.get_account_data_handler()
|
2021-04-23 07:05:51 -04:00
|
|
|
self.event_auth_handler = hs.get_event_auth_handler()
|
2023-07-31 05:58:03 -04:00
|
|
|
self._worker_lock_handler = hs.get_worker_locks_handler()
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2021-08-23 11:25:33 -04:00
|
|
|
self.member_linearizer: Linearizer = Linearizer(name="member")
|
2022-02-16 06:16:48 -05:00
|
|
|
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
|
2016-04-06 10:44:22 -04:00
|
|
|
|
2016-03-31 08:08:45 -04:00
|
|
|
self.clock = hs.get_clock()
|
2023-04-17 20:57:40 -04:00
|
|
|
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
2023-05-04 10:18:22 -04:00
|
|
|
self._third_party_event_rules = (
|
|
|
|
hs.get_module_api_callbacks().third_party_event_rules
|
|
|
|
)
|
2021-09-24 07:25:21 -04:00
|
|
|
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
|
2021-10-04 07:18:54 -04:00
|
|
|
self._enable_lookup = hs.config.registration.enable_3pid_lookup
|
2021-09-29 06:44:15 -04:00
|
|
|
self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2020-07-31 09:34:42 -04:00
|
|
|
self._join_rate_limiter_local = Ratelimiter(
|
2021-03-30 07:06:09 -04:00
|
|
|
store=self.store,
|
2020-07-31 09:34:42 -04:00
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_joins_local,
|
2020-07-31 09:34:42 -04:00
|
|
|
)
|
2022-07-19 07:45:17 -04:00
|
|
|
# Tracks joins from local users to rooms this server isn't a member of.
|
|
|
|
# I.e. joins this server makes by requesting /make_join /send_join from
|
|
|
|
# another server.
|
2020-07-31 09:34:42 -04:00
|
|
|
self._join_rate_limiter_remote = Ratelimiter(
|
2021-03-30 07:06:09 -04:00
|
|
|
store=self.store,
|
2020-07-31 09:34:42 -04:00
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_joins_remote,
|
2020-07-31 09:34:42 -04:00
|
|
|
)
|
2022-07-19 07:45:17 -04:00
|
|
|
# TODO: find a better place to keep this Ratelimiter.
|
|
|
|
# It needs to be
|
|
|
|
# - written to by event persistence code
|
|
|
|
# - written to by something which can snoop on replication streams
|
|
|
|
# - read by the RoomMemberHandler to rate limit joins from local users
|
|
|
|
# - read by the FederationServer to rate limit make_joins and send_joins from
|
|
|
|
# other homeservers
|
|
|
|
# I wonder if a homeserver-wide collection of rate limiters might be cleaner?
|
|
|
|
self._join_rate_per_room_limiter = Ratelimiter(
|
|
|
|
store=self.store,
|
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_joins_per_room,
|
2022-07-19 07:45:17 -04:00
|
|
|
)
|
2020-07-31 09:34:42 -04:00
|
|
|
|
2022-06-30 05:44:47 -04:00
|
|
|
# Ratelimiter for invites, keyed by room (across all issuers, all
|
|
|
|
# recipients).
|
2021-01-29 11:38:29 -05:00
|
|
|
self._invites_per_room_limiter = Ratelimiter(
|
2021-03-30 07:06:09 -04:00
|
|
|
store=self.store,
|
2021-01-29 11:38:29 -05:00
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_invites_per_room,
|
2021-01-29 11:38:29 -05:00
|
|
|
)
|
2022-06-30 05:44:47 -04:00
|
|
|
|
|
|
|
# Ratelimiter for invites, keyed by recipient (across all rooms, all
|
|
|
|
# issuers).
|
|
|
|
self._invites_per_recipient_limiter = Ratelimiter(
|
2021-03-30 07:06:09 -04:00
|
|
|
store=self.store,
|
2021-01-29 11:38:29 -05:00
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_invites_per_user,
|
2021-01-29 11:38:29 -05:00
|
|
|
)
|
|
|
|
|
2022-06-30 05:44:47 -04:00
|
|
|
# Ratelimiter for invites, keyed by issuer (across all rooms, all
|
|
|
|
# recipients).
|
|
|
|
self._invites_per_issuer_limiter = Ratelimiter(
|
|
|
|
store=self.store,
|
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_invites_per_issuer,
|
2022-06-30 05:44:47 -04:00
|
|
|
)
|
|
|
|
|
2022-02-03 08:28:15 -05:00
|
|
|
self._third_party_invite_limiter = Ratelimiter(
|
|
|
|
store=self.store,
|
|
|
|
clock=self.clock,
|
2023-08-29 19:39:39 -04:00
|
|
|
cfg=hs.config.ratelimiting.rc_third_party_invite,
|
2022-02-03 08:28:15 -05:00
|
|
|
)
|
|
|
|
|
2021-10-08 07:44:43 -04:00
|
|
|
self.request_ratelimiter = hs.get_request_ratelimiter()
|
2022-07-19 07:45:17 -04:00
|
|
|
hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room)
|
|
|
|
|
2023-09-15 09:37:44 -04:00
|
|
|
self._forgotten_room_retention_period = (
|
|
|
|
hs.config.server.forgotten_room_retention_period
|
|
|
|
)
|
|
|
|
|
2022-07-19 07:45:17 -04:00
|
|
|
def _on_user_joined_room(self, event_id: str, room_id: str) -> None:
|
|
|
|
"""Notify the rate limiter that a room join has occurred.
|
|
|
|
|
|
|
|
Use this to inform the RoomMemberHandler about joins that have either
|
|
|
|
- taken place on another homeserver, or
|
|
|
|
- on another worker in this homeserver.
|
|
|
|
Joins actioned by this worker should use the usual `ratelimit` method, which
|
|
|
|
checks the limit and increments the counter in one go.
|
|
|
|
"""
|
|
|
|
self._join_rate_per_room_limiter.record_action(requester=None, key=room_id)
|
2019-04-26 13:06:25 -04:00
|
|
|
|
2018-03-13 09:49:13 -04:00
|
|
|
@abc.abstractmethod
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _remote_join(
|
|
|
|
self,
|
|
|
|
requester: Requester,
|
|
|
|
remote_room_hosts: List[str],
|
|
|
|
room_id: str,
|
|
|
|
user: UserID,
|
|
|
|
content: dict,
|
2020-05-22 09:21:54 -04:00
|
|
|
) -> Tuple[str, int]:
|
2018-03-13 09:49:13 -04:00
|
|
|
"""Try and join a room that this server is not in
|
|
|
|
|
|
|
|
Args:
|
2022-08-22 09:17:59 -04:00
|
|
|
requester: The user making the request, according to the access token.
|
2020-05-15 15:05:25 -04:00
|
|
|
remote_room_hosts: List of servers that can be used to join via.
|
|
|
|
room_id: Room that we are trying to join
|
|
|
|
user: User who is trying to join
|
|
|
|
content: A dict that should be used as the content of the join event.
|
2023-02-10 18:31:05 -05:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
NoKnownServersError: if remote_room_hosts does not contain a server joined to
|
|
|
|
the room.
|
2018-03-13 09:49:13 -04:00
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
@abc.abstractmethod
|
|
|
|
async def remote_knock(
|
|
|
|
self,
|
2023-03-02 12:59:53 -05:00
|
|
|
requester: Requester,
|
2021-06-09 14:39:51 -04:00
|
|
|
remote_room_hosts: List[str],
|
|
|
|
room_id: str,
|
|
|
|
user: UserID,
|
|
|
|
content: dict,
|
|
|
|
) -> Tuple[str, int]:
|
|
|
|
"""Try and knock on a room that this server is not in
|
|
|
|
|
|
|
|
Args:
|
|
|
|
remote_room_hosts: List of servers that can be used to knock via.
|
|
|
|
room_id: Room that we are trying to knock on.
|
|
|
|
user: User who is trying to knock.
|
|
|
|
content: A dict that should be used as the content of the knock event.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2018-03-13 09:49:13 -04:00
|
|
|
@abc.abstractmethod
|
2020-07-09 05:40:19 -04:00
|
|
|
async def remote_reject_invite(
|
2020-05-15 15:05:25 -04:00
|
|
|
self,
|
2020-07-09 05:40:19 -04:00
|
|
|
invite_event_id: str,
|
|
|
|
txn_id: Optional[str],
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: Requester,
|
2020-07-09 05:40:19 -04:00
|
|
|
content: JsonDict,
|
2020-07-09 08:01:42 -04:00
|
|
|
) -> Tuple[str, int]:
|
2020-07-09 05:40:19 -04:00
|
|
|
"""
|
|
|
|
Rejects an out-of-band invite we have received from a remote server
|
2018-03-13 09:49:13 -04:00
|
|
|
|
|
|
|
Args:
|
2020-07-09 05:40:19 -04:00
|
|
|
invite_event_id: ID of the invite to be rejected
|
|
|
|
txn_id: optional transaction ID supplied by the client
|
|
|
|
requester: user making the rejection request, according to the access token
|
|
|
|
content: additional content to include in the rejection event.
|
|
|
|
Normally an empty dict.
|
2018-03-13 09:49:13 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-07-09 05:40:19 -04:00
|
|
|
event id, stream_id of the leave event
|
2018-03-13 09:49:13 -04:00
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
@abc.abstractmethod
|
|
|
|
async def remote_rescind_knock(
|
|
|
|
self,
|
|
|
|
knock_event_id: str,
|
|
|
|
txn_id: Optional[str],
|
|
|
|
requester: Requester,
|
|
|
|
content: JsonDict,
|
|
|
|
) -> Tuple[str, int]:
|
|
|
|
"""Rescind a local knock made on a remote room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
knock_event_id: The ID of the knock event to rescind.
|
|
|
|
txn_id: An optional transaction ID supplied by the client.
|
|
|
|
requester: The user making the request, according to the access token.
|
|
|
|
content: The content of the generated leave event.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple containing (event_id, stream_id of the leave event).
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2018-03-13 12:00:26 -04:00
|
|
|
@abc.abstractmethod
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _user_left_room(self, target: UserID, room_id: str) -> None:
|
2018-03-13 12:00:26 -04:00
|
|
|
"""Notifies distributor on master process that the user has left the
|
|
|
|
room.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
target
|
|
|
|
room_id
|
2018-03-13 12:00:26 -04:00
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2023-09-15 09:37:44 -04:00
|
|
|
async def forget(
|
|
|
|
self, user: UserID, room_id: str, do_not_schedule_purge: bool = False
|
|
|
|
) -> None:
|
2023-05-03 07:27:33 -04:00
|
|
|
user_id = user.to_string()
|
|
|
|
|
|
|
|
member = await self._storage_controllers.state.get_current_state_event(
|
|
|
|
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
|
|
|
)
|
|
|
|
membership = member.membership if member else None
|
|
|
|
|
|
|
|
if membership is not None and membership not in [
|
|
|
|
Membership.LEAVE,
|
|
|
|
Membership.BAN,
|
|
|
|
]:
|
|
|
|
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
|
|
|
|
|
|
|
|
# In normal case this call is only required if `membership` is not `None`.
|
|
|
|
# But: After the last member had left the room, the background update
|
|
|
|
# `_background_remove_left_rooms` is deleting rows related to this room from
|
|
|
|
# the table `current_state_events` and `get_current_state_events` is `None`.
|
|
|
|
await self.store.forget(user_id, room_id)
|
2021-03-17 07:14:39 -04:00
|
|
|
|
2023-09-15 09:37:44 -04:00
|
|
|
# If everyone locally has left the room, then there is no reason for us to keep the
|
|
|
|
# room around and we automatically purge room after a little bit
|
|
|
|
if (
|
|
|
|
not do_not_schedule_purge
|
|
|
|
and self._forgotten_room_retention_period
|
|
|
|
and await self.store.is_locally_forgotten_room(room_id)
|
|
|
|
):
|
|
|
|
await self.hs.get_task_scheduler().schedule_task(
|
|
|
|
PURGE_ROOM_ACTION_NAME,
|
|
|
|
resource_id=room_id,
|
|
|
|
timestamp=self.clock.time_msec()
|
|
|
|
+ self._forgotten_room_retention_period,
|
|
|
|
)
|
|
|
|
|
2021-05-12 10:05:28 -04:00
|
|
|
async def ratelimit_multiple_invites(
|
|
|
|
self,
|
|
|
|
requester: Optional[Requester],
|
|
|
|
room_id: Optional[str],
|
|
|
|
n_invites: int,
|
|
|
|
update: bool = True,
|
2021-09-20 08:56:23 -04:00
|
|
|
) -> None:
|
2021-05-12 10:05:28 -04:00
|
|
|
"""Ratelimit more than one invite sent by the given requester in the given room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
requester: The requester sending the invites.
|
|
|
|
room_id: The room the invites are being sent in.
|
|
|
|
n_invites: The amount of invites to ratelimit for.
|
|
|
|
update: Whether to update the ratelimiter's cache.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
LimitExceededError: The requester can't send that many invites in the room.
|
|
|
|
"""
|
|
|
|
await self._invites_per_room_limiter.ratelimit(
|
|
|
|
requester,
|
|
|
|
room_id,
|
|
|
|
update=update,
|
|
|
|
n_actions=n_invites,
|
|
|
|
)
|
|
|
|
|
2021-03-30 07:06:09 -04:00
|
|
|
async def ratelimit_invite(
|
|
|
|
self,
|
|
|
|
requester: Optional[Requester],
|
|
|
|
room_id: Optional[str],
|
|
|
|
invitee_user_id: str,
|
2021-09-20 08:56:23 -04:00
|
|
|
) -> None:
|
2021-01-29 11:38:29 -05:00
|
|
|
"""Ratelimit invites by room and by target user.
|
2021-02-03 05:17:37 -05:00
|
|
|
|
|
|
|
If room ID is missing then we just rate limit by target user.
|
2021-01-29 11:38:29 -05:00
|
|
|
"""
|
2021-02-03 05:17:37 -05:00
|
|
|
if room_id:
|
2021-03-30 07:06:09 -04:00
|
|
|
await self._invites_per_room_limiter.ratelimit(requester, room_id)
|
2021-02-03 05:17:37 -05:00
|
|
|
|
2022-06-30 05:44:47 -04:00
|
|
|
await self._invites_per_recipient_limiter.ratelimit(requester, invitee_user_id)
|
|
|
|
if requester is not None:
|
|
|
|
await self._invites_per_issuer_limiter.ratelimit(requester)
|
2021-01-29 11:38:29 -05:00
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
async def _local_membership_update(
|
2016-04-01 11:17:32 -04:00
|
|
|
self,
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: Requester,
|
|
|
|
target: UserID,
|
|
|
|
room_id: str,
|
|
|
|
membership: str,
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events: bool = False,
|
|
|
|
prev_event_ids: Optional[List[str]] = None,
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids: Optional[List[str]] = None,
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Optional[int] = None,
|
2020-05-15 15:05:25 -04:00
|
|
|
txn_id: Optional[str] = None,
|
|
|
|
ratelimit: bool = True,
|
|
|
|
content: Optional[dict] = None,
|
|
|
|
require_consent: bool = True,
|
2021-06-22 05:02:53 -04:00
|
|
|
outlier: bool = False,
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts: Optional[int] = None,
|
2020-05-22 09:21:54 -04:00
|
|
|
) -> Tuple[str, int]:
|
2021-06-22 05:02:53 -04:00
|
|
|
"""
|
|
|
|
Internal membership update function to get an existing event or create
|
|
|
|
and persist a new event for the new membership change.
|
|
|
|
|
|
|
|
Args:
|
2023-10-06 13:31:52 -04:00
|
|
|
requester: User requesting the membership change, i.e. the sender of the
|
|
|
|
desired membership event.
|
|
|
|
target: Use whose membership should change, i.e. the state_key of the
|
|
|
|
desired membership event.
|
2021-06-22 05:02:53 -04:00
|
|
|
room_id:
|
|
|
|
membership:
|
|
|
|
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events: Whether to allow this event to be created an empty
|
|
|
|
list of prev_events. Normally this is prohibited just because most
|
|
|
|
events should have a prev_event and we should only use this in special
|
2023-06-16 15:12:24 -04:00
|
|
|
cases (previously useful for MSC2716).
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
prev_event_ids: The event IDs to use as the prev events
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids:
|
2023-06-16 15:12:24 -04:00
|
|
|
The full state at a given event. This was previously used particularly
|
|
|
|
by the MSC2716 /batch_send endpoint. This should normally be left as
|
|
|
|
None, which will cause the auth_event_ids to be calculated based on the
|
|
|
|
room state at the prev_events.
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Override the depth used to order the event in the DAG.
|
|
|
|
Should normally be set to None, which will cause the depth to be calculated
|
|
|
|
based on the prev_events.
|
2021-06-22 05:02:53 -04:00
|
|
|
|
|
|
|
txn_id:
|
|
|
|
ratelimit:
|
|
|
|
content:
|
|
|
|
require_consent:
|
|
|
|
|
|
|
|
outlier: Indicates whether the event is an `outlier`, i.e. if
|
|
|
|
it's from an arbitrary point and floating in the DAG as
|
|
|
|
opposed to being inline with the current DAG.
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
|
|
|
|
the current timestamp if set to None.
|
2021-06-22 05:02:53 -04:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
Tuple of event ID and stream ordering position
|
|
|
|
"""
|
2019-01-18 12:03:09 -05:00
|
|
|
user_id = target.to_string()
|
|
|
|
|
2016-08-23 11:32:04 -04:00
|
|
|
if content is None:
|
|
|
|
content = {}
|
2016-04-01 11:17:32 -04:00
|
|
|
|
2016-08-23 11:32:04 -04:00
|
|
|
content["membership"] = membership
|
2016-04-01 11:17:32 -04:00
|
|
|
if requester.is_guest:
|
|
|
|
content["kind"] = "guest"
|
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
# Check if we already have an event with a matching transaction ID. (We
|
|
|
|
# do this check just before we persist an event as well, but may as well
|
|
|
|
# do it up front for efficiency.)
|
2023-04-25 04:37:09 -04:00
|
|
|
if txn_id:
|
2023-07-31 08:44:45 -04:00
|
|
|
existing_event_id = (
|
|
|
|
await self.event_creation_handler.get_event_id_from_transaction(
|
|
|
|
requester, txn_id, room_id
|
2023-04-25 04:37:09 -04:00
|
|
|
)
|
2023-07-31 08:44:45 -04:00
|
|
|
)
|
2020-10-13 07:07:56 -04:00
|
|
|
if existing_event_id:
|
|
|
|
event_pos = await self.store.get_position_for_event(existing_event_id)
|
|
|
|
return existing_event_id, event_pos.stream
|
|
|
|
|
2022-12-15 11:04:23 -05:00
|
|
|
# Try several times, it could fail with PartialStateConflictError,
|
|
|
|
# in handle_new_client_event, cf comment in except block.
|
|
|
|
max_retries = 5
|
|
|
|
for i in range(max_retries):
|
|
|
|
try:
|
2023-02-24 16:15:29 -05:00
|
|
|
(
|
|
|
|
event,
|
|
|
|
unpersisted_context,
|
|
|
|
) = await self.event_creation_handler.create_event(
|
2022-12-15 11:04:23 -05:00
|
|
|
requester,
|
|
|
|
{
|
|
|
|
"type": EventTypes.Member,
|
|
|
|
"content": content,
|
|
|
|
"room_id": room_id,
|
|
|
|
"sender": requester.user.to_string(),
|
|
|
|
"state_key": user_id,
|
|
|
|
# For backwards compatibility:
|
|
|
|
"membership": membership,
|
|
|
|
"origin_server_ts": origin_server_ts,
|
|
|
|
},
|
|
|
|
txn_id=txn_id,
|
|
|
|
allow_no_prev_events=allow_no_prev_events,
|
|
|
|
prev_event_ids=prev_event_ids,
|
|
|
|
state_event_ids=state_event_ids,
|
|
|
|
depth=depth,
|
|
|
|
require_consent=require_consent,
|
|
|
|
outlier=outlier,
|
|
|
|
)
|
2023-02-24 16:15:29 -05:00
|
|
|
context = await unpersisted_context.persist(event)
|
2022-12-15 11:04:23 -05:00
|
|
|
prev_state_ids = await context.get_prev_state_ids(
|
2023-07-20 05:46:37 -04:00
|
|
|
StateFilter.from_types([(EventTypes.Member, user_id)])
|
2022-12-15 11:04:23 -05:00
|
|
|
)
|
2016-04-01 11:17:32 -04:00
|
|
|
|
2022-12-15 11:04:23 -05:00
|
|
|
prev_member_event_id = prev_state_ids.get(
|
|
|
|
(EventTypes.Member, user_id), None
|
2022-07-19 07:45:17 -04:00
|
|
|
)
|
2020-08-24 13:06:04 -04:00
|
|
|
|
2022-12-15 11:04:23 -05:00
|
|
|
with opentracing.start_active_span("handle_new_client_event"):
|
|
|
|
result_event = (
|
|
|
|
await self.event_creation_handler.handle_new_client_event(
|
|
|
|
requester,
|
|
|
|
events_and_context=[(event, context)],
|
|
|
|
extra_users=[target],
|
|
|
|
ratelimit=ratelimit,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
if event.membership == Membership.LEAVE:
|
|
|
|
if prev_member_event_id:
|
|
|
|
prev_member_event = await self.store.get_event(
|
|
|
|
prev_member_event_id
|
|
|
|
)
|
|
|
|
if prev_member_event.membership == Membership.JOIN:
|
|
|
|
await self._user_left_room(target, room_id)
|
|
|
|
|
|
|
|
break
|
|
|
|
except PartialStateConflictError as e:
|
|
|
|
# Persisting couldn't happen because the room got un-partial stated
|
|
|
|
# in the meantime and context needs to be recomputed, so let's do so.
|
|
|
|
if i == max_retries - 1:
|
|
|
|
raise e
|
2016-04-01 11:17:32 -04:00
|
|
|
|
2020-10-02 11:45:41 -04:00
|
|
|
# we know it was persisted, so should have a stream ordering
|
|
|
|
assert result_event.internal_metadata.stream_ordering
|
|
|
|
return result_event.event_id, result_event.internal_metadata.stream_ordering
|
2017-01-09 13:25:13 -05:00
|
|
|
|
2020-05-15 09:32:13 -04:00
|
|
|
async def copy_room_tags_and_direct_to_room(
|
2021-09-20 08:56:23 -04:00
|
|
|
self, old_room_id: str, new_room_id: str, user_id: str
|
2020-05-15 15:05:25 -04:00
|
|
|
) -> None:
|
2019-01-25 06:09:34 -05:00
|
|
|
"""Copies the tags and direct room state from one room to another.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
old_room_id: The room ID of the old room.
|
|
|
|
new_room_id: The room ID of the new room.
|
|
|
|
user_id: The user's ID.
|
2019-01-25 06:09:34 -05:00
|
|
|
"""
|
2019-01-25 06:48:38 -05:00
|
|
|
# Retrieve user account data for predecessor room
|
2023-02-10 09:22:16 -05:00
|
|
|
user_account_data = await self.store.get_global_account_data_for_user(user_id)
|
2019-01-25 06:09:34 -05:00
|
|
|
|
2019-01-25 06:48:38 -05:00
|
|
|
# Copy direct message state if applicable
|
2020-10-05 09:28:05 -04:00
|
|
|
direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {})
|
2019-01-25 06:48:38 -05:00
|
|
|
|
|
|
|
# Check which key this room is under
|
|
|
|
if isinstance(direct_rooms, dict):
|
|
|
|
for key, room_id_list in direct_rooms.items():
|
|
|
|
if old_room_id in room_id_list and new_room_id not in room_id_list:
|
|
|
|
# Add new room_id to this key
|
|
|
|
direct_rooms[key].append(new_room_id)
|
|
|
|
|
|
|
|
# Save back to user's m.direct account data
|
2021-01-18 10:47:59 -05:00
|
|
|
await self.account_data_handler.add_account_data_for_user(
|
2020-10-05 09:28:05 -04:00
|
|
|
user_id, AccountDataTypes.DIRECT, direct_rooms
|
2019-01-25 06:48:38 -05:00
|
|
|
)
|
|
|
|
break
|
|
|
|
|
|
|
|
# Copy room tags if applicable
|
2020-05-15 09:32:13 -04:00
|
|
|
room_tags = await self.store.get_tags_for_room(user_id, old_room_id)
|
2019-01-25 06:09:34 -05:00
|
|
|
|
2019-01-25 06:48:38 -05:00
|
|
|
# Copy each room tag to the new room
|
|
|
|
for tag, tag_content in room_tags.items():
|
2021-01-18 10:47:59 -05:00
|
|
|
await self.account_data_handler.add_tag_to_room(
|
|
|
|
user_id, new_room_id, tag, tag_content
|
|
|
|
)
|
2019-01-25 06:21:25 -05:00
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
async def update_membership(
|
2016-03-31 08:08:45 -04:00
|
|
|
self,
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: Requester,
|
|
|
|
target: UserID,
|
|
|
|
room_id: str,
|
|
|
|
action: str,
|
|
|
|
txn_id: Optional[str] = None,
|
|
|
|
remote_room_hosts: Optional[List[str]] = None,
|
|
|
|
third_party_signed: Optional[dict] = None,
|
|
|
|
ratelimit: bool = True,
|
|
|
|
content: Optional[dict] = None,
|
2021-10-06 10:32:16 -04:00
|
|
|
new_room: bool = False,
|
2020-05-15 15:05:25 -04:00
|
|
|
require_consent: bool = True,
|
2021-06-22 05:02:53 -04:00
|
|
|
outlier: bool = False,
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events: bool = False,
|
2021-06-22 05:02:53 -04:00
|
|
|
prev_event_ids: Optional[List[str]] = None,
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids: Optional[List[str]] = None,
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Optional[int] = None,
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts: Optional[int] = None,
|
2020-07-09 08:01:42 -04:00
|
|
|
) -> Tuple[str, int]:
|
2020-08-20 15:07:42 -04:00
|
|
|
"""Update a user's membership in a room.
|
|
|
|
|
|
|
|
Params:
|
|
|
|
requester: The user who is performing the update.
|
|
|
|
target: The user whose membership is being updated.
|
|
|
|
room_id: The room ID whose membership is being updated.
|
|
|
|
action: The membership change, see synapse.api.constants.Membership.
|
|
|
|
txn_id: The transaction ID, if given.
|
|
|
|
remote_room_hosts: Remote servers to send the update to.
|
|
|
|
third_party_signed: Information from a 3PID invite.
|
|
|
|
ratelimit: Whether to rate limit the request.
|
|
|
|
content: The content of the created event.
|
2021-10-06 10:32:16 -04:00
|
|
|
new_room: Whether the membership update is happening in the context of a room
|
|
|
|
creation.
|
2020-08-20 15:07:42 -04:00
|
|
|
require_consent: Whether consent is required.
|
2021-06-22 05:02:53 -04:00
|
|
|
outlier: Indicates whether the event is an `outlier`, i.e. if
|
|
|
|
it's from an arbitrary point and floating in the DAG as
|
|
|
|
opposed to being inline with the current DAG.
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events: Whether to allow this event to be created an empty
|
|
|
|
list of prev_events. Normally this is prohibited just because most
|
|
|
|
events should have a prev_event and we should only use this in special
|
2023-06-16 15:12:24 -04:00
|
|
|
cases (previously useful for MSC2716).
|
2021-06-22 05:02:53 -04:00
|
|
|
prev_event_ids: The event IDs to use as the prev events
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids:
|
2023-06-16 15:12:24 -04:00
|
|
|
The full state at a given event. This was previously used particularly
|
|
|
|
by the MSC2716 /batch_send endpoint. This should normally be left as
|
|
|
|
None, which will cause the auth_event_ids to be calculated based on the
|
|
|
|
room state at the prev_events.
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Override the depth used to order the event in the DAG.
|
|
|
|
Should normally be set to None, which will cause the depth to be calculated
|
|
|
|
based on the prev_events.
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
|
|
|
|
the current timestamp if set to None.
|
2020-08-20 15:07:42 -04:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple of the new event ID and stream ID.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ShadowBanError if a shadow-banned requester attempts to send an invite.
|
|
|
|
"""
|
2023-10-06 13:31:52 -04:00
|
|
|
if ratelimit:
|
|
|
|
if action == Membership.JOIN:
|
|
|
|
# Only rate-limit if the user isn't already joined to the room, otherwise
|
|
|
|
# we'll end up blocking profile updates.
|
|
|
|
(
|
|
|
|
current_membership,
|
|
|
|
_,
|
|
|
|
) = await self.store.get_local_current_membership_for_user_in_room(
|
|
|
|
requester.user.to_string(),
|
|
|
|
room_id,
|
|
|
|
)
|
|
|
|
if current_membership != Membership.JOIN:
|
|
|
|
await self._join_rate_limiter_local.ratelimit(requester)
|
|
|
|
await self._join_rate_per_room_limiter.ratelimit(
|
|
|
|
requester, key=room_id, update=False
|
|
|
|
)
|
|
|
|
elif action == Membership.INVITE:
|
|
|
|
await self.ratelimit_invite(requester, room_id, target.to_string())
|
|
|
|
|
2020-08-20 15:07:42 -04:00
|
|
|
if action == Membership.INVITE and requester.shadow_banned:
|
|
|
|
# We randomly sleep a bit just to annoy the requester.
|
|
|
|
await self.clock.sleep(random.randint(1, 10))
|
|
|
|
raise ShadowBanError()
|
|
|
|
|
2016-08-12 04:32:19 -04:00
|
|
|
key = (room_id,)
|
2016-04-06 10:44:22 -04:00
|
|
|
|
2022-02-16 06:16:48 -05:00
|
|
|
as_id = object()
|
|
|
|
if requester.app_service:
|
|
|
|
as_id = requester.app_service.id
|
|
|
|
|
|
|
|
# We first linearise by the application service (to try to limit concurrent joins
|
|
|
|
# by application services), and then by room ID.
|
2022-04-05 10:43:52 -04:00
|
|
|
async with self.member_as_limiter.queue(as_id):
|
|
|
|
async with self.member_linearizer.queue(key):
|
2023-07-31 05:58:03 -04:00
|
|
|
async with self._worker_lock_handler.acquire_read_write_lock(
|
2023-08-16 10:19:54 -04:00
|
|
|
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
2023-07-31 05:58:03 -04:00
|
|
|
):
|
|
|
|
with opentracing.start_active_span("update_membership_locked"):
|
|
|
|
result = await self.update_membership_locked(
|
|
|
|
requester,
|
|
|
|
target,
|
|
|
|
room_id,
|
|
|
|
action,
|
|
|
|
txn_id=txn_id,
|
|
|
|
remote_room_hosts=remote_room_hosts,
|
|
|
|
third_party_signed=third_party_signed,
|
|
|
|
ratelimit=ratelimit,
|
|
|
|
content=content,
|
|
|
|
new_room=new_room,
|
|
|
|
require_consent=require_consent,
|
|
|
|
outlier=outlier,
|
|
|
|
allow_no_prev_events=allow_no_prev_events,
|
|
|
|
prev_event_ids=prev_event_ids,
|
|
|
|
state_event_ids=state_event_ids,
|
|
|
|
depth=depth,
|
|
|
|
origin_server_ts=origin_server_ts,
|
|
|
|
)
|
2016-04-06 10:44:22 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return result
|
2016-04-06 10:44:22 -04:00
|
|
|
|
2020-10-29 07:48:39 -04:00
|
|
|
async def update_membership_locked(
|
2016-04-06 10:44:22 -04:00
|
|
|
self,
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: Requester,
|
|
|
|
target: UserID,
|
|
|
|
room_id: str,
|
|
|
|
action: str,
|
|
|
|
txn_id: Optional[str] = None,
|
|
|
|
remote_room_hosts: Optional[List[str]] = None,
|
|
|
|
third_party_signed: Optional[dict] = None,
|
|
|
|
ratelimit: bool = True,
|
|
|
|
content: Optional[dict] = None,
|
2021-10-06 10:32:16 -04:00
|
|
|
new_room: bool = False,
|
2020-05-15 15:05:25 -04:00
|
|
|
require_consent: bool = True,
|
2021-06-22 05:02:53 -04:00
|
|
|
outlier: bool = False,
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events: bool = False,
|
2021-06-22 05:02:53 -04:00
|
|
|
prev_event_ids: Optional[List[str]] = None,
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids: Optional[List[str]] = None,
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Optional[int] = None,
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts: Optional[int] = None,
|
2020-07-09 08:01:42 -04:00
|
|
|
) -> Tuple[str, int]:
|
2020-10-29 07:48:39 -04:00
|
|
|
"""Helper for update_membership.
|
|
|
|
|
|
|
|
Assumes that the membership linearizer is already held for the room.
|
2021-06-22 05:02:53 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
requester:
|
|
|
|
target:
|
|
|
|
room_id:
|
|
|
|
action:
|
|
|
|
txn_id:
|
|
|
|
remote_room_hosts:
|
|
|
|
third_party_signed:
|
|
|
|
ratelimit:
|
|
|
|
content:
|
2021-10-06 10:32:16 -04:00
|
|
|
new_room: Whether the membership update is happening in the context of a room
|
|
|
|
creation.
|
2021-06-22 05:02:53 -04:00
|
|
|
require_consent:
|
|
|
|
outlier: Indicates whether the event is an `outlier`, i.e. if
|
|
|
|
it's from an arbitrary point and floating in the DAG as
|
|
|
|
opposed to being inline with the current DAG.
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events: Whether to allow this event to be created an empty
|
|
|
|
list of prev_events. Normally this is prohibited just because most
|
|
|
|
events should have a prev_event and we should only use this in special
|
2023-06-16 15:12:24 -04:00
|
|
|
cases (previously useful for MSC2716).
|
2021-06-22 05:02:53 -04:00
|
|
|
prev_event_ids: The event IDs to use as the prev events
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids:
|
2023-06-16 15:12:24 -04:00
|
|
|
The full state at a given event. This was previously used particularly
|
|
|
|
by the MSC2716 /batch_send endpoint. This should normally be left as
|
|
|
|
None, which will cause the auth_event_ids to be calculated based on the
|
|
|
|
room state at the prev_events.
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Override the depth used to order the event in the DAG.
|
|
|
|
Should normally be set to None, which will cause the depth to be calculated
|
|
|
|
based on the prev_events.
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
|
|
|
|
the current timestamp if set to None.
|
2021-06-22 05:02:53 -04:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple of the new event ID and stream ID.
|
2020-10-29 07:48:39 -04:00
|
|
|
"""
|
2022-08-03 13:19:34 -04:00
|
|
|
|
2017-01-09 13:25:13 -05:00
|
|
|
content_specified = bool(content)
|
2016-08-23 11:32:04 -04:00
|
|
|
if content is None:
|
|
|
|
content = {}
|
2017-12-07 09:17:15 -05:00
|
|
|
else:
|
|
|
|
# We do a copy here as we potentially change some keys
|
|
|
|
# later on.
|
|
|
|
content = dict(content)
|
2016-08-23 11:32:04 -04:00
|
|
|
|
2020-11-30 13:59:29 -05:00
|
|
|
# allow the server notices mxid to set room-level profile
|
|
|
|
is_requester_server_notices_user = (
|
|
|
|
self._server_notices_mxid is not None
|
|
|
|
and requester.user.to_string() == self._server_notices_mxid
|
|
|
|
)
|
|
|
|
|
|
|
|
if (
|
|
|
|
not self.allow_per_room_profiles and not is_requester_server_notices_user
|
|
|
|
) or requester.shadow_banned:
|
2019-05-16 09:26:41 -04:00
|
|
|
# Strip profile data, knowing that new profile data will be added to the
|
|
|
|
# event's content in event_creation_handler.create_event() using the target's
|
|
|
|
# global profile.
|
|
|
|
content.pop("displayname", None)
|
|
|
|
content.pop("avatar_url", None)
|
|
|
|
|
2021-08-23 11:25:33 -04:00
|
|
|
if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
|
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
|
|
|
|
errcode=Codes.BAD_JSON,
|
|
|
|
)
|
|
|
|
|
|
|
|
if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
|
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
|
|
|
|
errcode=Codes.BAD_JSON,
|
|
|
|
)
|
|
|
|
|
2022-08-23 04:48:35 -04:00
|
|
|
if "avatar_url" in content and content.get("avatar_url") is not None:
|
2022-01-28 09:41:33 -05:00
|
|
|
if not await self.profile_handler.check_avatar_size_and_mime_type(
|
|
|
|
content["avatar_url"],
|
|
|
|
):
|
|
|
|
raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
|
|
|
|
|
2021-09-30 11:13:59 -04:00
|
|
|
# The event content should *not* include the authorising user as
|
|
|
|
# it won't be properly signed. Strip it out since it might come
|
|
|
|
# back from a client updating a display name / avatar.
|
|
|
|
#
|
|
|
|
# This only applies to restricted rooms, but there should be no reason
|
|
|
|
# for a client to include it. Unconditionally remove it.
|
|
|
|
content.pop(EventContentFields.AUTHORISING_USER, None)
|
|
|
|
|
2016-03-31 08:08:45 -04:00
|
|
|
effective_membership_state = action
|
|
|
|
if action in ["kick", "unban"]:
|
|
|
|
effective_membership_state = "leave"
|
|
|
|
|
2017-09-19 11:08:14 -04:00
|
|
|
# if this is a join with a 3pid signature, we may need to turn a 3pid
|
|
|
|
# invite into a normal invite before we can handle the join.
|
2016-03-31 08:08:45 -04:00
|
|
|
if third_party_signed is not None:
|
2020-05-01 10:15:36 -04:00
|
|
|
await self.federation_handler.exchange_third_party_invite(
|
2016-03-31 08:08:45 -04:00
|
|
|
third_party_signed["sender"],
|
|
|
|
target.to_string(),
|
|
|
|
room_id,
|
|
|
|
third_party_signed,
|
|
|
|
)
|
|
|
|
|
2016-04-01 11:17:32 -04:00
|
|
|
if not remote_room_hosts:
|
|
|
|
remote_room_hosts = []
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2017-06-19 07:36:28 -04:00
|
|
|
if effective_membership_state not in ("leave", "ban"):
|
2020-05-01 10:15:36 -04:00
|
|
|
is_blocked = await self.store.is_room_blocked(room_id)
|
2017-06-19 07:36:28 -04:00
|
|
|
if is_blocked:
|
|
|
|
raise SynapseError(403, "This room has been blocked on this server")
|
|
|
|
|
2018-05-17 06:34:28 -04:00
|
|
|
if effective_membership_state == Membership.INVITE:
|
2021-01-29 11:38:29 -05:00
|
|
|
target_id = target.to_string()
|
|
|
|
|
2018-05-18 06:18:39 -04:00
|
|
|
# block any attempts to invite the server notices mxid
|
2021-01-29 11:38:29 -05:00
|
|
|
if target_id == self._server_notices_mxid:
|
2020-06-16 08:51:47 -04:00
|
|
|
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
|
2018-05-18 06:18:39 -04:00
|
|
|
|
2022-07-11 12:52:10 -04:00
|
|
|
block_invite_result = None
|
2018-05-17 06:34:28 -04:00
|
|
|
|
|
|
|
if (
|
|
|
|
self._server_notices_mxid is not None
|
|
|
|
and requester.user.to_string() == self._server_notices_mxid
|
|
|
|
):
|
|
|
|
# allow the server notices mxid to send invites
|
|
|
|
is_requester_admin = True
|
|
|
|
|
|
|
|
else:
|
2022-08-22 09:17:59 -04:00
|
|
|
is_requester_admin = await self.auth.is_server_admin(requester)
|
2018-05-17 06:34:28 -04:00
|
|
|
|
2017-10-03 09:17:44 -04:00
|
|
|
if not is_requester_admin:
|
2021-09-29 06:44:15 -04:00
|
|
|
if self.config.server.block_non_admin_invites:
|
2017-10-03 10:20:14 -04:00
|
|
|
logger.info(
|
2017-10-03 10:16:40 -04:00
|
|
|
"Blocking invite: user is not admin and non-admin "
|
|
|
|
"invites disabled"
|
|
|
|
)
|
2022-07-11 12:52:10 -04:00
|
|
|
block_invite_result = (Codes.FORBIDDEN, {})
|
2017-10-03 10:16:40 -04:00
|
|
|
|
2023-04-17 20:57:40 -04:00
|
|
|
spam_check = await self._spam_checker_module_callbacks.user_may_invite(
|
2021-01-29 11:38:29 -05:00
|
|
|
requester.user.to_string(), target_id, room_id
|
2022-06-13 14:16:16 -04:00
|
|
|
)
|
2023-09-15 09:37:44 -04:00
|
|
|
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
2017-10-03 10:20:14 -04:00
|
|
|
logger.info("Blocking invite due to spam checker")
|
2022-07-11 12:52:10 -04:00
|
|
|
block_invite_result = spam_check
|
2017-10-03 08:53:09 -04:00
|
|
|
|
2022-07-11 12:52:10 -04:00
|
|
|
if block_invite_result is not None:
|
2022-06-13 14:16:16 -04:00
|
|
|
raise SynapseError(
|
2022-07-11 12:52:10 -04:00
|
|
|
403,
|
|
|
|
"Invites have been disabled on this server",
|
|
|
|
errcode=block_invite_result[0],
|
|
|
|
additional_fields=block_invite_result[1],
|
2022-06-13 14:16:16 -04:00
|
|
|
)
|
2017-09-19 11:08:14 -04:00
|
|
|
|
2021-12-11 00:08:51 -05:00
|
|
|
# An empty prev_events list is allowed as long as the auth_event_ids are present
|
|
|
|
if prev_event_ids is not None:
|
2021-06-22 05:02:53 -04:00
|
|
|
return await self._local_membership_update(
|
|
|
|
requester=requester,
|
|
|
|
target=target,
|
|
|
|
room_id=room_id,
|
|
|
|
membership=effective_membership_state,
|
|
|
|
txn_id=txn_id,
|
|
|
|
ratelimit=ratelimit,
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
allow_no_prev_events=allow_no_prev_events,
|
2021-06-22 05:02:53 -04:00
|
|
|
prev_event_ids=prev_event_ids,
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids=state_event_ids,
|
2022-07-13 14:32:46 -04:00
|
|
|
depth=depth,
|
2021-06-22 05:02:53 -04:00
|
|
|
content=content,
|
|
|
|
require_consent=require_consent,
|
|
|
|
outlier=outlier,
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts=origin_server_ts,
|
2021-06-22 05:02:53 -04:00
|
|
|
)
|
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
|
2018-08-08 12:01:57 -04:00
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
is_partial_state_room = await self.store.is_partial_state_room(room_id)
|
|
|
|
partial_state_before_join = await self.state_handler.compute_state_after_events(
|
|
|
|
room_id, latest_event_ids, await_full_state=False
|
2016-03-31 08:08:45 -04:00
|
|
|
)
|
2023-02-10 18:31:05 -05:00
|
|
|
# `is_partial_state_room` also indicates whether `partial_state_before_join` is
|
|
|
|
# partial.
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2023-03-29 04:37:27 -04:00
|
|
|
is_host_in_room = await self._is_host_in_room(partial_state_before_join)
|
2017-01-09 13:25:13 -05:00
|
|
|
|
2023-03-29 04:37:27 -04:00
|
|
|
# if we are not in the room, we won't have the current state
|
|
|
|
if is_host_in_room:
|
|
|
|
# TODO: Refactor into dictionary of explicitly allowed transitions
|
|
|
|
# between old and new state, with specific error messages for some
|
|
|
|
# transitions and generic otherwise
|
|
|
|
old_state_id = partial_state_before_join.get(
|
|
|
|
(EventTypes.Member, target.to_string())
|
|
|
|
)
|
2019-04-04 08:05:51 -04:00
|
|
|
|
2023-03-29 04:37:27 -04:00
|
|
|
if old_state_id:
|
|
|
|
old_state = await self.store.get_event(old_state_id, allow_none=True)
|
|
|
|
old_membership = (
|
|
|
|
old_state.content.get("membership") if old_state else None
|
|
|
|
)
|
|
|
|
if action == "unban" and old_membership != "ban":
|
2018-05-25 06:07:21 -04:00
|
|
|
raise SynapseError(
|
2023-03-29 04:37:27 -04:00
|
|
|
403,
|
|
|
|
"Cannot unban user who was not banned"
|
|
|
|
" (membership=%s)" % old_membership,
|
|
|
|
errcode=Codes.BAD_STATE,
|
|
|
|
)
|
|
|
|
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
|
|
|
|
raise SynapseError(
|
|
|
|
403,
|
|
|
|
"Cannot %s user who was banned" % (action,),
|
|
|
|
errcode=Codes.BAD_STATE,
|
2018-05-25 06:07:21 -04:00
|
|
|
)
|
|
|
|
|
2023-03-29 04:37:27 -04:00
|
|
|
if old_state:
|
|
|
|
same_content = content == old_state.content
|
|
|
|
same_membership = old_membership == effective_membership_state
|
|
|
|
same_sender = requester.user.to_string() == old_state.sender
|
|
|
|
if same_sender and same_membership and same_content:
|
|
|
|
# duplicate event.
|
|
|
|
# we know it was persisted, so must have a stream ordering.
|
|
|
|
assert old_state.internal_metadata.stream_ordering
|
|
|
|
return (
|
|
|
|
old_state.event_id,
|
|
|
|
old_state.internal_metadata.stream_ordering,
|
|
|
|
)
|
|
|
|
|
|
|
|
if old_membership in ["ban", "leave"] and action == "kick":
|
|
|
|
raise AuthError(403, "The target user is not in the room")
|
|
|
|
|
|
|
|
# we don't allow people to reject invites to the server notice
|
|
|
|
# room, but they can leave it once they are joined.
|
|
|
|
if (
|
|
|
|
old_membership == Membership.INVITE
|
|
|
|
and effective_membership_state == Membership.LEAVE
|
|
|
|
):
|
|
|
|
is_blocked = await self.store.is_server_notice_room(room_id)
|
|
|
|
if is_blocked:
|
|
|
|
raise SynapseError(
|
|
|
|
HTTPStatus.FORBIDDEN,
|
|
|
|
"You cannot reject this invite",
|
|
|
|
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
if action == "kick":
|
|
|
|
raise AuthError(403, "The target user is not in the room")
|
2016-04-01 11:17:32 -04:00
|
|
|
|
|
|
|
if effective_membership_state == Membership.JOIN:
|
2016-12-28 11:51:33 -05:00
|
|
|
if requester.is_guest:
|
2023-02-10 18:31:05 -05:00
|
|
|
guest_can_join = await self._can_guest_join(partial_state_before_join)
|
2016-12-28 11:51:33 -05:00
|
|
|
if not guest_can_join:
|
|
|
|
# This should be an auth check, but guests are a local concept,
|
|
|
|
# so don't really fit into the general auth process.
|
|
|
|
raise AuthError(403, "Guest access not allowed")
|
2016-04-01 11:17:32 -04:00
|
|
|
|
2021-10-06 10:32:16 -04:00
|
|
|
# Figure out whether the user is a server admin to determine whether they
|
|
|
|
# should be able to bypass the spam checker.
|
|
|
|
if (
|
|
|
|
self._server_notices_mxid is not None
|
|
|
|
and requester.user.to_string() == self._server_notices_mxid
|
|
|
|
):
|
|
|
|
# allow the server notices mxid to join rooms
|
|
|
|
bypass_spam_checker = True
|
|
|
|
|
|
|
|
else:
|
2022-08-22 09:17:59 -04:00
|
|
|
bypass_spam_checker = await self.auth.is_server_admin(requester)
|
2021-10-06 10:32:16 -04:00
|
|
|
|
|
|
|
inviter = await self._get_inviter(target.to_string(), room_id)
|
|
|
|
if (
|
|
|
|
not bypass_spam_checker
|
|
|
|
# We assume that if the spam checker allowed the user to create
|
|
|
|
# a room then they're allowed to join it.
|
|
|
|
and not new_room
|
2022-06-13 14:16:16 -04:00
|
|
|
):
|
2023-04-17 20:57:40 -04:00
|
|
|
spam_check = (
|
|
|
|
await self._spam_checker_module_callbacks.user_may_join_room(
|
|
|
|
target.to_string(), room_id, is_invited=inviter is not None
|
|
|
|
)
|
2021-10-06 10:32:16 -04:00
|
|
|
)
|
2023-09-15 09:37:44 -04:00
|
|
|
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
2022-07-11 12:52:10 -04:00
|
|
|
raise SynapseError(
|
|
|
|
403,
|
|
|
|
"Not allowed to join this room",
|
|
|
|
errcode=spam_check[0],
|
|
|
|
additional_fields=spam_check[1],
|
|
|
|
)
|
2021-10-06 10:32:16 -04:00
|
|
|
|
2021-07-26 12:17:00 -04:00
|
|
|
# Check if a remote join should be performed.
|
|
|
|
remote_join, remote_room_hosts = await self._should_perform_remote_join(
|
2022-07-15 08:20:47 -04:00
|
|
|
target.to_string(),
|
|
|
|
room_id,
|
|
|
|
remote_room_hosts,
|
|
|
|
content,
|
2023-02-10 18:31:05 -05:00
|
|
|
is_partial_state_room,
|
2022-07-15 08:20:47 -04:00
|
|
|
is_host_in_room,
|
2023-02-10 18:31:05 -05:00
|
|
|
partial_state_before_join,
|
2021-07-26 12:17:00 -04:00
|
|
|
)
|
|
|
|
if remote_join:
|
2020-12-11 05:17:49 -05:00
|
|
|
if ratelimit:
|
2022-06-16 07:40:29 -04:00
|
|
|
await self._join_rate_limiter_remote.ratelimit(
|
2020-12-11 05:17:49 -05:00
|
|
|
requester,
|
2020-07-31 09:34:42 -04:00
|
|
|
)
|
2022-07-19 07:45:17 -04:00
|
|
|
await self._join_rate_per_room_limiter.ratelimit(
|
|
|
|
requester,
|
|
|
|
key=room_id,
|
|
|
|
update=False,
|
|
|
|
)
|
2020-07-31 09:34:42 -04:00
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
inviter = await self._get_inviter(target.to_string(), room_id)
|
2016-04-01 11:17:32 -04:00
|
|
|
if inviter and not self.hs.is_mine(inviter):
|
|
|
|
remote_room_hosts.append(inviter.domain)
|
|
|
|
|
2016-08-23 11:32:04 -04:00
|
|
|
content["membership"] = Membership.JOIN
|
2016-04-07 04:42:52 -04:00
|
|
|
|
2022-07-05 06:56:06 -04:00
|
|
|
try:
|
|
|
|
profile = self.profile_handler
|
|
|
|
if not content_specified:
|
|
|
|
content["displayname"] = await profile.get_displayname(target)
|
|
|
|
content["avatar_url"] = await profile.get_avatar_url(target)
|
|
|
|
except Exception as e:
|
|
|
|
logger.info(
|
|
|
|
"Failed to get profile information while processing remote join for %r: %s",
|
|
|
|
target,
|
|
|
|
e,
|
|
|
|
)
|
2016-04-07 04:42:52 -04:00
|
|
|
|
2016-04-01 11:17:32 -04:00
|
|
|
if requester.is_guest:
|
|
|
|
content["kind"] = "guest"
|
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
remote_join_response = await self._remote_join(
|
2018-03-13 13:50:39 -04:00
|
|
|
requester, remote_room_hosts, room_id, target, content
|
2016-04-01 11:17:32 -04:00
|
|
|
)
|
2019-10-10 05:06:45 -04:00
|
|
|
|
|
|
|
return remote_join_response
|
2016-04-01 11:17:32 -04:00
|
|
|
|
|
|
|
elif effective_membership_state == Membership.LEAVE:
|
|
|
|
if not is_host_in_room:
|
2021-06-09 14:39:51 -04:00
|
|
|
# Figure out the user's current membership state for the room
|
2020-11-25 15:06:13 -05:00
|
|
|
(
|
|
|
|
current_membership_type,
|
|
|
|
current_membership_event_id,
|
|
|
|
) = await self.store.get_local_current_membership_for_user_in_room(
|
|
|
|
target.to_string(), room_id
|
|
|
|
)
|
2021-06-09 14:39:51 -04:00
|
|
|
if not current_membership_type or not current_membership_event_id:
|
2020-07-30 11:58:57 -04:00
|
|
|
logger.info(
|
|
|
|
"%s sent a leave request to %s, but that is not an active room "
|
2021-06-09 14:39:51 -04:00
|
|
|
"on this server, or there is no pending invite or knock",
|
2020-07-30 11:58:57 -04:00
|
|
|
target,
|
|
|
|
room_id,
|
|
|
|
)
|
|
|
|
|
2016-04-01 11:17:32 -04:00
|
|
|
raise SynapseError(404, "Not a known room")
|
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
# perhaps we've been invited
|
|
|
|
if current_membership_type == Membership.INVITE:
|
|
|
|
invite = await self.store.get_event(current_membership_event_id)
|
|
|
|
logger.info(
|
|
|
|
"%s rejects invite to %s from %s",
|
|
|
|
target,
|
|
|
|
room_id,
|
|
|
|
invite.sender,
|
|
|
|
)
|
2020-07-09 05:40:19 -04:00
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
if not self.hs.is_mine_id(invite.sender):
|
|
|
|
# send the rejection to the inviter's HS (with fallback to
|
|
|
|
# local event)
|
|
|
|
return await self.remote_reject_invite(
|
|
|
|
invite.event_id,
|
|
|
|
txn_id,
|
|
|
|
requester,
|
|
|
|
content,
|
|
|
|
)
|
|
|
|
|
|
|
|
# the inviter was on our server, but has now left. Carry on
|
|
|
|
# with the normal rejection codepath, which will also send the
|
|
|
|
# rejection out to any other servers we believe are still in the room.
|
|
|
|
|
|
|
|
# thanks to overzealous cleaning up of event_forward_extremities in
|
|
|
|
# `delete_old_current_state_events`, it's possible to end up with no
|
|
|
|
# forward extremities here. If that happens, let's just hang the
|
|
|
|
# rejection off the invite event.
|
|
|
|
#
|
|
|
|
# see: https://github.com/matrix-org/synapse/issues/7139
|
|
|
|
if len(latest_event_ids) == 0:
|
|
|
|
latest_event_ids = [invite.event_id]
|
|
|
|
|
|
|
|
# or perhaps this is a remote room that a local user has knocked on
|
|
|
|
elif current_membership_type == Membership.KNOCK:
|
|
|
|
knock = await self.store.get_event(current_membership_event_id)
|
|
|
|
return await self.remote_rescind_knock(
|
|
|
|
knock.event_id, txn_id, requester, content
|
2018-03-01 06:31:11 -05:00
|
|
|
)
|
2016-04-01 11:17:32 -04:00
|
|
|
|
2021-06-15 07:45:14 -04:00
|
|
|
elif effective_membership_state == Membership.KNOCK:
|
2021-06-09 14:39:51 -04:00
|
|
|
if not is_host_in_room:
|
|
|
|
# The knock needs to be sent over federation instead
|
|
|
|
remote_room_hosts.append(get_domain_from_id(room_id))
|
2020-07-30 11:58:57 -04:00
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
content["membership"] = Membership.KNOCK
|
|
|
|
|
2022-07-05 06:56:06 -04:00
|
|
|
try:
|
|
|
|
profile = self.profile_handler
|
|
|
|
if "displayname" not in content:
|
|
|
|
content["displayname"] = await profile.get_displayname(target)
|
|
|
|
if "avatar_url" not in content:
|
|
|
|
content["avatar_url"] = await profile.get_avatar_url(target)
|
|
|
|
except Exception as e:
|
|
|
|
logger.info(
|
|
|
|
"Failed to get profile information while processing remote knock for %r: %s",
|
|
|
|
target,
|
|
|
|
e,
|
|
|
|
)
|
2021-06-09 14:39:51 -04:00
|
|
|
|
|
|
|
return await self.remote_knock(
|
2023-03-02 12:59:53 -05:00
|
|
|
requester, remote_room_hosts, room_id, target, content
|
2021-06-09 14:39:51 -04:00
|
|
|
)
|
2020-07-30 11:58:57 -04:00
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
return await self._local_membership_update(
|
2016-04-01 11:17:32 -04:00
|
|
|
requester=requester,
|
|
|
|
target=target,
|
|
|
|
room_id=room_id,
|
|
|
|
membership=effective_membership_state,
|
|
|
|
txn_id=txn_id,
|
2016-03-31 08:08:45 -04:00
|
|
|
ratelimit=ratelimit,
|
2020-01-03 11:24:59 -05:00
|
|
|
prev_event_ids=latest_event_ids,
|
2022-03-25 10:21:06 -04:00
|
|
|
state_event_ids=state_event_ids,
|
2022-07-13 14:32:46 -04:00
|
|
|
depth=depth,
|
2016-08-23 11:32:04 -04:00
|
|
|
content=content,
|
2019-03-20 13:39:29 -04:00
|
|
|
require_consent=require_consent,
|
2021-06-22 05:02:53 -04:00
|
|
|
outlier=outlier,
|
2022-10-03 09:30:45 -04:00
|
|
|
origin_server_ts=origin_server_ts,
|
2016-03-31 08:08:45 -04:00
|
|
|
)
|
|
|
|
|
2021-07-26 12:17:00 -04:00
|
|
|
async def _should_perform_remote_join(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
room_id: str,
|
|
|
|
remote_room_hosts: List[str],
|
|
|
|
content: JsonDict,
|
2023-02-10 18:31:05 -05:00
|
|
|
is_partial_state_room: bool,
|
2021-07-26 12:17:00 -04:00
|
|
|
is_host_in_room: bool,
|
2023-02-10 18:31:05 -05:00
|
|
|
partial_state_before_join: StateMap[str],
|
2021-07-26 12:17:00 -04:00
|
|
|
) -> Tuple[bool, List[str]]:
|
|
|
|
"""
|
|
|
|
Check whether the server should do a remote join (as opposed to a local
|
|
|
|
join) for a user.
|
|
|
|
|
|
|
|
Generally a remote join is used if:
|
|
|
|
|
|
|
|
* The server is not yet in the room.
|
|
|
|
* The server is in the room, the room has restricted join rules, the user
|
|
|
|
is not joined or invited to the room, and the server does not have
|
|
|
|
another user who is capable of issuing invites.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: The user joining the room.
|
|
|
|
room_id: The room being joined.
|
|
|
|
remote_room_hosts: A list of remote room hosts.
|
|
|
|
content: The content to use as the event body of the join. This may
|
|
|
|
be modified.
|
2023-02-10 18:31:05 -05:00
|
|
|
is_partial_state_room: `True` if the server currently doesn't hold the full
|
|
|
|
state of the room.
|
|
|
|
is_host_in_room: `True` if the host is in the room.
|
|
|
|
partial_state_before_join: The state before the join event (i.e. the
|
|
|
|
resolution of the states after its parent events). May be full or
|
|
|
|
partial state, depending on `is_partial_state_room`.
|
2021-07-26 12:17:00 -04:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple of:
|
|
|
|
True if a remote join should be performed. False if the join can be
|
|
|
|
done locally.
|
|
|
|
|
|
|
|
A list of remote room hosts to use. This is an empty list if a
|
|
|
|
local join is to be done.
|
|
|
|
"""
|
|
|
|
# If the host isn't in the room, pass through the prospective hosts.
|
|
|
|
if not is_host_in_room:
|
|
|
|
return True, remote_room_hosts
|
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
prev_member_event_id = partial_state_before_join.get(
|
|
|
|
(EventTypes.Member, user_id), None
|
|
|
|
)
|
|
|
|
previous_membership = None
|
|
|
|
if prev_member_event_id:
|
|
|
|
prev_member_event = await self.store.get_event(prev_member_event_id)
|
|
|
|
previous_membership = prev_member_event.membership
|
|
|
|
|
|
|
|
# If we are not fully joined yet, and the target is not already in the room,
|
|
|
|
# let's do a remote join so another server with the full state can validate
|
|
|
|
# that the user has not been banned for example.
|
|
|
|
# We could just accept the join and wait for state res to resolve that later on
|
|
|
|
# but we would then leak room history to this person until then, which is pretty
|
|
|
|
# bad.
|
|
|
|
if is_partial_state_room and previous_membership != Membership.JOIN:
|
|
|
|
return True, remote_room_hosts
|
|
|
|
|
2021-07-26 12:17:00 -04:00
|
|
|
# If the host is in the room, but not one of the authorised hosts
|
|
|
|
# for restricted join rules, a remote join must be used.
|
|
|
|
room_version = await self.store.get_room_version(room_id)
|
|
|
|
|
|
|
|
# If restricted join rules are not being used, a local join can always
|
|
|
|
# be used.
|
|
|
|
if not await self.event_auth_handler.has_restricted_join_rules(
|
2023-02-10 18:31:05 -05:00
|
|
|
partial_state_before_join, room_version
|
2021-07-26 12:17:00 -04:00
|
|
|
):
|
|
|
|
return False, []
|
|
|
|
|
|
|
|
# If the user is invited to the room or already joined, the join
|
|
|
|
# event can always be issued locally.
|
2023-02-10 18:31:05 -05:00
|
|
|
if previous_membership in (Membership.JOIN, Membership.INVITE):
|
|
|
|
return False, []
|
|
|
|
|
|
|
|
# All the partial state cases are covered above. We have been given the full
|
|
|
|
# state of the room.
|
|
|
|
assert not is_partial_state_room
|
|
|
|
state_before_join = partial_state_before_join
|
2021-07-26 12:17:00 -04:00
|
|
|
|
|
|
|
# If the local host has a user who can issue invites, then a local
|
|
|
|
# join can be done.
|
|
|
|
#
|
|
|
|
# If not, generate a new list of remote hosts based on which
|
|
|
|
# can issue invites.
|
2022-07-15 08:20:47 -04:00
|
|
|
event_map = await self.store.get_events(state_before_join.values())
|
2021-07-26 12:17:00 -04:00
|
|
|
current_state = {
|
|
|
|
state_key: event_map[event_id]
|
2022-07-15 08:20:47 -04:00
|
|
|
for state_key, event_id in state_before_join.items()
|
2021-07-26 12:17:00 -04:00
|
|
|
}
|
|
|
|
allowed_servers = get_servers_from_users(
|
|
|
|
get_users_which_can_issue_invite(current_state)
|
|
|
|
)
|
|
|
|
|
|
|
|
# If the local server is not one of allowed servers, then a remote
|
|
|
|
# join must be done. Return the list of prospective servers based on
|
|
|
|
# which can issue invites.
|
|
|
|
if self.hs.hostname not in allowed_servers:
|
|
|
|
return True, list(allowed_servers)
|
|
|
|
|
|
|
|
# Ensure the member should be allowed access via membership in a room.
|
|
|
|
await self.event_auth_handler.check_restricted_join_rules(
|
2023-02-10 18:31:05 -05:00
|
|
|
state_before_join, room_version, user_id, previous_membership
|
2021-07-26 12:17:00 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# If this is going to be a local join, additional information must
|
|
|
|
# be included in the event content in order to efficiently validate
|
|
|
|
# the event.
|
2024-03-13 12:46:44 -04:00
|
|
|
content[EventContentFields.AUTHORISING_USER] = (
|
|
|
|
await self.event_auth_handler.get_user_which_could_invite(
|
|
|
|
room_id,
|
|
|
|
state_before_join,
|
|
|
|
)
|
2021-07-26 12:17:00 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
return False, []
|
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def transfer_room_state_on_room_upgrade(
|
|
|
|
self, old_room_id: str, room_id: str
|
|
|
|
) -> None:
|
2019-11-01 06:28:09 -04:00
|
|
|
"""Upon our server becoming aware of an upgraded room, either by upgrading a room
|
|
|
|
ourselves or joining one, we can transfer over information from the previous room.
|
|
|
|
|
|
|
|
Copies user state (tags/push rules) for every local user that was in the old room, as
|
|
|
|
well as migrating the room directory state.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
old_room_id: The ID of the old room
|
|
|
|
room_id: The ID of the new room
|
2019-11-01 06:28:09 -04:00
|
|
|
"""
|
2020-01-06 10:22:46 -05:00
|
|
|
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
|
|
|
|
|
2019-11-01 06:28:09 -04:00
|
|
|
# Find all local users that were in the old room and copy over each user's state
|
2022-09-30 21:10:50 -04:00
|
|
|
local_users = await self.store.get_local_users_in_room(old_room_id)
|
|
|
|
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, local_users)
|
2019-11-01 06:28:09 -04:00
|
|
|
|
|
|
|
# Add new room to the room directory if the old room was there
|
|
|
|
# Remove old room from the room directory
|
2020-05-15 09:32:13 -04:00
|
|
|
old_room = await self.store.get_room(old_room_id)
|
2023-11-09 11:13:31 -05:00
|
|
|
# If the old room exists and is public.
|
|
|
|
if old_room is not None and old_room[0]:
|
2020-05-15 09:32:13 -04:00
|
|
|
await self.store.set_room_is_public(old_room_id, False)
|
|
|
|
await self.store.set_room_is_public(room_id, True)
|
2019-11-04 13:17:11 -05:00
|
|
|
|
2020-03-30 12:53:25 -04:00
|
|
|
# Transfer alias mappings in the room directory
|
2020-05-15 09:32:13 -04:00
|
|
|
await self.store.update_aliases_for_room(old_room_id, room_id)
|
2020-03-30 12:53:25 -04:00
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def copy_user_state_on_room_upgrade(
|
|
|
|
self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
|
|
|
|
) -> None:
|
2019-11-01 06:28:09 -04:00
|
|
|
"""Copy user-specific information when they join a new room when that new room is the
|
2019-10-10 05:06:45 -04:00
|
|
|
result of a room upgrade
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
old_room_id: The ID of upgraded room
|
|
|
|
new_room_id: The ID of the new room
|
|
|
|
user_ids: User IDs to copy state for
|
2019-10-10 05:06:45 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
logger.debug(
|
2019-11-01 06:28:09 -04:00
|
|
|
"Copying over room tags and push rules from %s to %s for users %s",
|
|
|
|
old_room_id,
|
2019-10-10 05:06:45 -04:00
|
|
|
new_room_id,
|
2019-11-01 06:28:09 -04:00
|
|
|
user_ids,
|
2019-10-10 05:06:45 -04:00
|
|
|
)
|
|
|
|
|
2019-11-01 06:28:09 -04:00
|
|
|
for user_id in user_ids:
|
|
|
|
try:
|
|
|
|
# It is an upgraded room. Copy over old tags
|
2020-05-15 09:32:13 -04:00
|
|
|
await self.copy_room_tags_and_direct_to_room(
|
2019-11-01 06:28:09 -04:00
|
|
|
old_room_id, new_room_id, user_id
|
|
|
|
)
|
|
|
|
# Copy over push rules
|
2020-05-15 09:32:13 -04:00
|
|
|
await self.store.copy_push_rules_from_room_to_room_for_user(
|
2019-11-01 06:28:09 -04:00
|
|
|
old_room_id, new_room_id, user_id
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
logger.exception(
|
|
|
|
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
|
|
|
|
"Skipping...",
|
|
|
|
old_room_id,
|
|
|
|
new_room_id,
|
|
|
|
user_id,
|
|
|
|
)
|
|
|
|
continue
|
2019-10-10 05:06:45 -04:00
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def send_membership_event(
|
|
|
|
self,
|
2020-10-01 08:09:18 -04:00
|
|
|
requester: Optional[Requester],
|
2020-05-15 15:05:25 -04:00
|
|
|
event: EventBase,
|
|
|
|
context: EventContext,
|
|
|
|
ratelimit: bool = True,
|
2021-09-20 08:56:23 -04:00
|
|
|
) -> None:
|
2016-03-31 08:08:45 -04:00
|
|
|
"""
|
|
|
|
Change the membership status of a user in a room.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: The local user who requested the membership
|
2016-03-31 08:08:45 -04:00
|
|
|
event. If None, certain checks, like whether this homeserver can
|
|
|
|
act as the sender, will be skipped.
|
2020-05-15 15:05:25 -04:00
|
|
|
event: The membership event.
|
2016-03-31 08:08:45 -04:00
|
|
|
context: The context of the event.
|
2020-05-15 15:05:25 -04:00
|
|
|
ratelimit: Whether to rate limit this request.
|
2016-03-31 08:08:45 -04:00
|
|
|
Raises:
|
|
|
|
SynapseError if there was a problem changing the membership.
|
2022-12-15 11:04:23 -05:00
|
|
|
PartialStateConflictError: if attempting to persist a partial state event in
|
|
|
|
a room that has been un-partial stated.
|
2016-03-31 08:08:45 -04:00
|
|
|
"""
|
|
|
|
target_user = UserID.from_string(event.state_key)
|
|
|
|
room_id = event.room_id
|
|
|
|
|
|
|
|
if requester is not None:
|
|
|
|
sender = UserID.from_string(event.sender)
|
|
|
|
assert (
|
|
|
|
sender == requester.user
|
|
|
|
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
|
|
|
|
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
|
|
|
|
else:
|
2019-07-29 12:47:27 -04:00
|
|
|
requester = types.create_requester(target_user)
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2022-05-20 04:54:12 -04:00
|
|
|
prev_state_ids = await context.get_prev_state_ids(
|
2023-07-20 05:46:37 -04:00
|
|
|
StateFilter.from_types([(EventTypes.GuestAccess, "")])
|
2022-05-20 04:54:12 -04:00
|
|
|
)
|
2016-03-31 08:08:45 -04:00
|
|
|
if event.membership == Membership.JOIN:
|
2016-08-25 12:32:22 -04:00
|
|
|
if requester.is_guest:
|
2020-05-01 10:15:36 -04:00
|
|
|
guest_can_join = await self._can_guest_join(prev_state_ids)
|
2016-08-25 12:32:22 -04:00
|
|
|
if not guest_can_join:
|
|
|
|
# This should be an auth check, but guests are a local concept,
|
|
|
|
# so don't really fit into the general auth process.
|
|
|
|
raise AuthError(403, "Guest access not allowed")
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2017-06-19 11:07:54 -04:00
|
|
|
if event.membership not in (Membership.LEAVE, Membership.BAN):
|
2020-05-01 10:15:36 -04:00
|
|
|
is_blocked = await self.store.is_room_blocked(room_id)
|
2017-06-19 11:07:54 -04:00
|
|
|
if is_blocked:
|
|
|
|
raise SynapseError(403, "This room has been blocked on this server")
|
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
event = await self.event_creation_handler.handle_new_client_event(
|
2022-09-28 06:11:48 -04:00
|
|
|
requester,
|
|
|
|
events_and_context=[(event, context)],
|
|
|
|
extra_users=[target_user],
|
|
|
|
ratelimit=ratelimit,
|
2016-04-01 11:17:32 -04:00
|
|
|
)
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2020-09-09 12:22:00 -04:00
|
|
|
if event.membership == Membership.LEAVE:
|
2023-07-20 05:46:37 -04:00
|
|
|
prev_state_ids = await context.get_prev_state_ids(
|
|
|
|
StateFilter.from_types([(EventTypes.Member, event.state_key)])
|
|
|
|
)
|
|
|
|
prev_member_event_id = prev_state_ids.get(
|
|
|
|
(EventTypes.Member, event.state_key), None
|
|
|
|
)
|
|
|
|
|
2016-08-25 12:32:22 -04:00
|
|
|
if prev_member_event_id:
|
2020-05-01 10:15:36 -04:00
|
|
|
prev_member_event = await self.store.get_event(prev_member_event_id)
|
2016-08-25 12:32:22 -04:00
|
|
|
if prev_member_event.membership == Membership.JOIN:
|
2020-05-01 10:15:36 -04:00
|
|
|
await self._user_left_room(target_user, room_id)
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
async def _can_guest_join(self, partial_current_state_ids: StateMap[str]) -> bool:
|
2016-03-31 08:08:45 -04:00
|
|
|
"""
|
|
|
|
Returns whether a guest can join a room based on its current state.
|
2023-02-10 18:31:05 -05:00
|
|
|
|
|
|
|
Args:
|
|
|
|
partial_current_state_ids: The current state of the room. May be full or
|
|
|
|
partial state.
|
2016-03-31 08:08:45 -04:00
|
|
|
"""
|
2023-02-10 18:31:05 -05:00
|
|
|
guest_access_id = partial_current_state_ids.get(
|
|
|
|
(EventTypes.GuestAccess, ""), None
|
|
|
|
)
|
2016-08-25 12:32:22 -04:00
|
|
|
if not guest_access_id:
|
2019-07-23 09:00:55 -04:00
|
|
|
return False
|
2016-08-25 12:32:22 -04:00
|
|
|
|
2020-05-15 09:32:13 -04:00
|
|
|
guest_access = await self.store.get_event(guest_access_id)
|
2016-08-25 12:32:22 -04:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
return bool(
|
2016-03-31 08:08:45 -04:00
|
|
|
guest_access
|
|
|
|
and guest_access.content
|
2021-09-06 07:17:16 -04:00
|
|
|
and guest_access.content.get(EventContentFields.GUEST_ACCESS)
|
|
|
|
== GuestAccess.CAN_JOIN
|
2016-03-31 08:08:45 -04:00
|
|
|
)
|
|
|
|
|
2021-09-06 07:17:16 -04:00
|
|
|
async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
|
|
|
|
"""Kick any local guest users from the room.
|
|
|
|
|
|
|
|
This is called when the room state changes from guests allowed to not-allowed.
|
|
|
|
|
|
|
|
Params:
|
|
|
|
current_state: the current state of the room. We will iterate this to look
|
|
|
|
for guest users to kick.
|
|
|
|
"""
|
|
|
|
for member_event in current_state:
|
|
|
|
try:
|
|
|
|
if member_event.type != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not self.hs.is_mine_id(member_event.state_key):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if member_event.content["membership"] not in {
|
|
|
|
Membership.JOIN,
|
|
|
|
Membership.INVITE,
|
|
|
|
}:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if (
|
|
|
|
"kind" not in member_event.content
|
|
|
|
or member_event.content["kind"] != "guest"
|
|
|
|
):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# We make the user choose to leave, rather than have the
|
|
|
|
# event-sender kick them. This is partially because we don't
|
|
|
|
# need to worry about power levels, and partially because guest
|
|
|
|
# users are a concept which doesn't hugely work over federation,
|
|
|
|
# and having homeservers have their own users leave keeps more
|
|
|
|
# of that decision-making and control local to the guest-having
|
|
|
|
# homeserver.
|
|
|
|
target_user = UserID.from_string(member_event.state_key)
|
|
|
|
requester = create_requester(
|
|
|
|
target_user, is_guest=True, authenticated_entity=self._server_name
|
|
|
|
)
|
|
|
|
handler = self.hs.get_room_member_handler()
|
|
|
|
await handler.update_membership(
|
|
|
|
requester,
|
|
|
|
target_user,
|
|
|
|
member_event.room_id,
|
|
|
|
"leave",
|
|
|
|
ratelimit=False,
|
|
|
|
require_consent=False,
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception("Error kicking guest user: %s" % (e,))
|
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def lookup_room_alias(
|
|
|
|
self, room_alias: RoomAlias
|
|
|
|
) -> Tuple[RoomID, List[str]]:
|
2016-03-31 08:08:45 -04:00
|
|
|
"""
|
|
|
|
Get the room ID associated with a room alias.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
room_alias: The alias to look up.
|
2016-03-31 08:08:45 -04:00
|
|
|
Returns:
|
|
|
|
A tuple of:
|
|
|
|
The room ID as a RoomID object.
|
|
|
|
Hosts likely to be participating in the room ([str]).
|
|
|
|
Raises:
|
|
|
|
SynapseError if room alias could not be found.
|
|
|
|
"""
|
2018-03-01 05:54:37 -05:00
|
|
|
directory_handler = self.directory_handler
|
2020-05-15 09:32:13 -04:00
|
|
|
mapping = await directory_handler.get_association(room_alias)
|
2016-03-31 08:08:45 -04:00
|
|
|
|
|
|
|
if not mapping:
|
|
|
|
raise SynapseError(404, "No such room alias")
|
|
|
|
|
|
|
|
room_id = mapping["room_id"]
|
|
|
|
servers = mapping["servers"]
|
|
|
|
|
2018-09-18 13:27:37 -04:00
|
|
|
# put the server which owns the alias at the front of the server list.
|
|
|
|
if room_alias.domain in servers:
|
|
|
|
servers.remove(room_alias.domain)
|
2023-06-14 10:42:33 -04:00
|
|
|
servers.insert(0, room_alias.domain)
|
2018-09-18 13:27:37 -04:00
|
|
|
|
2019-09-11 11:02:42 -04:00
|
|
|
return RoomID.from_string(room_id), servers
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _get_inviter(self, user_id: str, room_id: str) -> Optional[UserID]:
|
2020-05-15 09:32:13 -04:00
|
|
|
invite = await self.store.get_invite_for_local_user_in_room(
|
2016-04-04 13:02:48 -04:00
|
|
|
user_id=user_id, room_id=room_id
|
|
|
|
)
|
2016-04-01 11:17:32 -04:00
|
|
|
if invite:
|
2019-07-23 09:00:55 -04:00
|
|
|
return UserID.from_string(invite.sender)
|
2020-05-15 15:05:25 -04:00
|
|
|
return None
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
async def do_3pid_invite(
|
2019-09-11 11:02:42 -04:00
|
|
|
self,
|
2020-05-15 15:05:25 -04:00
|
|
|
room_id: str,
|
|
|
|
inviter: UserID,
|
|
|
|
medium: str,
|
|
|
|
address: str,
|
|
|
|
id_server: str,
|
|
|
|
requester: Requester,
|
|
|
|
txn_id: Optional[str],
|
2022-08-31 08:10:25 -04:00
|
|
|
id_access_token: str,
|
2022-07-13 14:32:46 -04:00
|
|
|
prev_event_ids: Optional[List[str]] = None,
|
|
|
|
depth: Optional[int] = None,
|
|
|
|
) -> Tuple[str, int]:
|
2020-08-20 15:07:42 -04:00
|
|
|
"""Invite a 3PID to a room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The room to invite the 3PID to.
|
|
|
|
inviter: The user sending the invite.
|
|
|
|
medium: The 3PID's medium.
|
|
|
|
address: The 3PID's address.
|
|
|
|
id_server: The identity server to use.
|
|
|
|
requester: The user making the request.
|
|
|
|
txn_id: The transaction ID this is part of, or None if this is not
|
|
|
|
part of a transaction.
|
2022-08-31 08:10:25 -04:00
|
|
|
id_access_token: Identity server access token.
|
2022-07-13 14:32:46 -04:00
|
|
|
depth: Override the depth used to order the event in the DAG.
|
|
|
|
prev_event_ids: The event IDs to use as the prev events
|
|
|
|
Should normally be set to None, which will cause the depth to be calculated
|
|
|
|
based on the prev_events.
|
2020-08-20 15:07:42 -04:00
|
|
|
|
|
|
|
Returns:
|
2022-07-13 14:32:46 -04:00
|
|
|
Tuple of event ID and stream ordering position
|
2020-08-20 15:07:42 -04:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
ShadowBanError if the requester has been shadow-banned.
|
|
|
|
"""
|
2021-09-29 06:44:15 -04:00
|
|
|
if self.config.server.block_non_admin_invites:
|
2022-08-22 09:17:59 -04:00
|
|
|
is_requester_admin = await self.auth.is_server_admin(requester)
|
2017-09-19 11:08:14 -04:00
|
|
|
if not is_requester_admin:
|
|
|
|
raise SynapseError(
|
|
|
|
403, "Invites have been disabled on this server", Codes.FORBIDDEN
|
|
|
|
)
|
|
|
|
|
2020-08-20 15:07:42 -04:00
|
|
|
if requester.shadow_banned:
|
|
|
|
# We randomly sleep a bit just to annoy the requester.
|
|
|
|
await self.clock.sleep(random.randint(1, 10))
|
|
|
|
raise ShadowBanError()
|
|
|
|
|
2019-05-02 10:23:08 -04:00
|
|
|
# We need to rate limit *before* we send out any 3PID invites, so we
|
|
|
|
# can't just rely on the standard ratelimiting of events.
|
2022-02-03 08:28:15 -05:00
|
|
|
await self._third_party_invite_limiter.ratelimit(requester)
|
2019-04-26 13:06:25 -04:00
|
|
|
|
2023-05-04 10:18:22 -04:00
|
|
|
can_invite = await self._third_party_event_rules.check_threepid_can_be_invited(
|
2019-06-17 11:27:47 -04:00
|
|
|
medium, address, room_id
|
|
|
|
)
|
|
|
|
if not can_invite:
|
|
|
|
raise SynapseError(
|
|
|
|
403,
|
|
|
|
"This third-party identifier can not be invited in this room",
|
|
|
|
Codes.FORBIDDEN,
|
|
|
|
)
|
|
|
|
|
2019-09-11 11:02:42 -04:00
|
|
|
if not self._enable_lookup:
|
|
|
|
raise SynapseError(
|
|
|
|
403, "Looking up third-party identifiers is denied from this server"
|
|
|
|
)
|
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
invitee = await self.identity_handler.lookup_3pid(
|
2019-09-27 05:36:20 -04:00
|
|
|
id_server, medium, address, id_access_token
|
|
|
|
)
|
2016-03-31 08:08:45 -04:00
|
|
|
|
|
|
|
if invitee:
|
2020-08-20 15:07:42 -04:00
|
|
|
# Note that update_membership with an action of "invite" can raise
|
|
|
|
# a ShadowBanError, but this was done above already.
|
2021-10-06 11:18:13 -04:00
|
|
|
# We don't check the invite against the spamchecker(s) here (through
|
|
|
|
# user_may_invite) because we'll do it further down the line anyway (in
|
|
|
|
# update_membership_locked).
|
2022-07-13 14:32:46 -04:00
|
|
|
event_id, stream_id = await self.update_membership(
|
2016-03-31 08:08:45 -04:00
|
|
|
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
|
|
|
|
)
|
|
|
|
else:
|
2021-10-06 11:18:13 -04:00
|
|
|
# Check if the spamchecker(s) allow this invite to go through.
|
2023-04-17 20:57:40 -04:00
|
|
|
spam_check = (
|
|
|
|
await self._spam_checker_module_callbacks.user_may_send_3pid_invite(
|
|
|
|
inviter_userid=requester.user.to_string(),
|
|
|
|
medium=medium,
|
|
|
|
address=address,
|
|
|
|
room_id=room_id,
|
|
|
|
)
|
2022-06-13 14:16:16 -04:00
|
|
|
)
|
2023-09-15 09:37:44 -04:00
|
|
|
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
2022-07-11 12:52:10 -04:00
|
|
|
raise SynapseError(
|
|
|
|
403,
|
|
|
|
"Cannot send threepid invite",
|
|
|
|
errcode=spam_check[0],
|
|
|
|
additional_fields=spam_check[1],
|
|
|
|
)
|
2021-10-06 11:18:13 -04:00
|
|
|
|
2022-07-13 14:32:46 -04:00
|
|
|
event, stream_id = await self._make_and_store_3pid_invite(
|
2019-09-17 13:05:13 -04:00
|
|
|
requester,
|
|
|
|
id_server,
|
|
|
|
medium,
|
|
|
|
address,
|
|
|
|
room_id,
|
|
|
|
inviter,
|
|
|
|
txn_id=txn_id,
|
|
|
|
id_access_token=id_access_token,
|
2022-07-13 14:32:46 -04:00
|
|
|
prev_event_ids=prev_event_ids,
|
|
|
|
depth=depth,
|
2016-03-31 08:08:45 -04:00
|
|
|
)
|
2022-07-13 14:32:46 -04:00
|
|
|
event_id = event.event_id
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2022-07-13 14:32:46 -04:00
|
|
|
return event_id, stream_id
|
2020-05-22 09:21:54 -04:00
|
|
|
|
2020-05-01 10:15:36 -04:00
|
|
|
async def _make_and_store_3pid_invite(
|
2019-09-17 13:05:13 -04:00
|
|
|
self,
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: Requester,
|
|
|
|
id_server: str,
|
|
|
|
medium: str,
|
|
|
|
address: str,
|
|
|
|
room_id: str,
|
|
|
|
user: UserID,
|
|
|
|
txn_id: Optional[str],
|
2022-08-31 08:10:25 -04:00
|
|
|
id_access_token: str,
|
2022-07-13 14:32:46 -04:00
|
|
|
prev_event_ids: Optional[List[str]] = None,
|
|
|
|
depth: Optional[int] = None,
|
|
|
|
) -> Tuple[EventBase, int]:
|
2022-06-06 04:24:12 -04:00
|
|
|
room_state = await self._storage_controllers.state.get_current_state(
|
|
|
|
room_id,
|
|
|
|
StateFilter.from_types(
|
|
|
|
[
|
|
|
|
(EventTypes.Member, user.to_string()),
|
|
|
|
(EventTypes.CanonicalAlias, ""),
|
|
|
|
(EventTypes.Name, ""),
|
|
|
|
(EventTypes.Create, ""),
|
|
|
|
(EventTypes.JoinRules, ""),
|
|
|
|
(EventTypes.RoomAvatar, ""),
|
|
|
|
]
|
|
|
|
),
|
|
|
|
)
|
2016-03-31 08:08:45 -04:00
|
|
|
|
|
|
|
inviter_display_name = ""
|
|
|
|
inviter_avatar_url = ""
|
|
|
|
member_event = room_state.get((EventTypes.Member, user.to_string()))
|
|
|
|
if member_event:
|
|
|
|
inviter_display_name = member_event.content.get("displayname", "")
|
|
|
|
inviter_avatar_url = member_event.content.get("avatar_url", "")
|
|
|
|
|
2018-06-13 05:31:01 -04:00
|
|
|
# if user has no display name, default to their MXID
|
|
|
|
if not inviter_display_name:
|
|
|
|
inviter_display_name = user.to_string()
|
|
|
|
|
2016-03-31 08:08:45 -04:00
|
|
|
canonical_room_alias = ""
|
|
|
|
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
|
|
|
|
if canonical_alias_event:
|
|
|
|
canonical_room_alias = canonical_alias_event.content.get("alias", "")
|
|
|
|
|
|
|
|
room_name = ""
|
|
|
|
room_name_event = room_state.get((EventTypes.Name, ""))
|
|
|
|
if room_name_event:
|
|
|
|
room_name = room_name_event.content.get("name", "")
|
|
|
|
|
2021-08-04 13:39:57 -04:00
|
|
|
room_type = None
|
|
|
|
room_create_event = room_state.get((EventTypes.Create, ""))
|
|
|
|
if room_create_event:
|
|
|
|
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
|
|
|
|
|
2016-03-31 08:08:45 -04:00
|
|
|
room_join_rules = ""
|
|
|
|
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
|
|
|
|
if join_rules_event:
|
|
|
|
room_join_rules = join_rules_event.content.get("join_rule", "")
|
|
|
|
|
|
|
|
room_avatar_url = ""
|
|
|
|
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
|
|
|
|
if room_avatar_event:
|
|
|
|
room_avatar_url = room_avatar_event.content.get("url", "")
|
|
|
|
|
2019-10-31 11:43:24 -04:00
|
|
|
(
|
|
|
|
token,
|
|
|
|
public_keys,
|
|
|
|
fallback_public_key,
|
|
|
|
display_name,
|
2020-05-01 10:15:36 -04:00
|
|
|
) = await self.identity_handler.ask_id_server_for_third_party_invite(
|
2019-10-31 11:43:24 -04:00
|
|
|
requester=requester,
|
|
|
|
id_server=id_server,
|
|
|
|
medium=medium,
|
|
|
|
address=address,
|
|
|
|
room_id=room_id,
|
|
|
|
inviter_user_id=user.to_string(),
|
|
|
|
room_alias=canonical_room_alias,
|
|
|
|
room_avatar_url=room_avatar_url,
|
|
|
|
room_join_rules=room_join_rules,
|
|
|
|
room_name=room_name,
|
2021-08-04 13:39:57 -04:00
|
|
|
room_type=room_type,
|
2019-10-31 11:43:24 -04:00
|
|
|
inviter_display_name=inviter_display_name,
|
|
|
|
inviter_avatar_url=inviter_avatar_url,
|
|
|
|
id_access_token=id_access_token,
|
2016-03-31 08:08:45 -04:00
|
|
|
)
|
|
|
|
|
2020-05-22 09:21:54 -04:00
|
|
|
(
|
|
|
|
event,
|
|
|
|
stream_id,
|
|
|
|
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
2016-03-31 08:08:45 -04:00
|
|
|
requester,
|
|
|
|
{
|
|
|
|
"type": EventTypes.ThirdPartyInvite,
|
|
|
|
"content": {
|
|
|
|
"display_name": display_name,
|
|
|
|
"public_keys": public_keys,
|
|
|
|
# For backwards compatibility:
|
|
|
|
"key_validity_url": fallback_public_key["key_validity_url"],
|
|
|
|
"public_key": fallback_public_key["public_key"],
|
|
|
|
},
|
|
|
|
"room_id": room_id,
|
|
|
|
"sender": user.to_string(),
|
|
|
|
"state_key": token,
|
|
|
|
},
|
2019-06-28 11:04:05 -04:00
|
|
|
ratelimit=False,
|
2016-03-31 08:08:45 -04:00
|
|
|
txn_id=txn_id,
|
2022-07-13 14:32:46 -04:00
|
|
|
prev_event_ids=prev_event_ids,
|
|
|
|
depth=depth,
|
2016-03-31 08:08:45 -04:00
|
|
|
)
|
2022-07-13 14:32:46 -04:00
|
|
|
return event, stream_id
|
2016-03-31 08:08:45 -04:00
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
async def _is_host_in_room(self, partial_current_state_ids: StateMap[str]) -> bool:
|
|
|
|
"""Returns whether the homeserver is in the room based on its current state.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
partial_current_state_ids: The current state of the room. May be full or
|
|
|
|
partial state.
|
|
|
|
"""
|
2016-08-25 12:32:22 -04:00
|
|
|
# Have we just created the room, and is this about to be the very
|
|
|
|
# first member event?
|
2023-02-10 18:31:05 -05:00
|
|
|
create_event_id = partial_current_state_ids.get(("m.room.create", ""))
|
|
|
|
if len(partial_current_state_ids) == 1 and create_event_id:
|
2019-01-29 11:11:19 -05:00
|
|
|
# We can only get here if we're in the process of creating the room
|
2019-07-23 09:00:55 -04:00
|
|
|
return True
|
2016-08-25 12:32:22 -04:00
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
for etype, state_key in partial_current_state_ids:
|
2016-08-25 12:32:22 -04:00
|
|
|
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
|
|
|
|
continue
|
|
|
|
|
2023-02-10 18:31:05 -05:00
|
|
|
event_id = partial_current_state_ids[(etype, state_key)]
|
2020-05-15 09:32:13 -04:00
|
|
|
event = await self.store.get_event(event_id, allow_none=True)
|
2016-08-25 12:32:22 -04:00
|
|
|
if not event:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if event.membership == Membership.JOIN:
|
2019-07-23 09:00:55 -04:00
|
|
|
return True
|
2016-08-25 12:32:22 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return False
|
2018-03-13 09:49:13 -04:00
|
|
|
|
|
|
|
|
|
|
|
class RoomMemberMasterHandler(RoomMemberHandler):
|
2021-04-29 07:17:28 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-09-09 12:22:00 -04:00
|
|
|
super().__init__(hs)
|
2018-03-13 12:00:26 -04:00
|
|
|
|
|
|
|
self.distributor = hs.get_distributor()
|
|
|
|
self.distributor.declare("user_left_room")
|
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _is_remote_room_too_complex(
|
|
|
|
self, room_id: str, remote_room_hosts: List[str]
|
|
|
|
) -> Optional[bool]:
|
2019-07-29 12:47:27 -04:00
|
|
|
"""
|
|
|
|
Check if complexity of a remote room is too great.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
room_id
|
|
|
|
remote_room_hosts
|
2019-07-29 12:47:27 -04:00
|
|
|
|
|
|
|
Returns: bool of whether the complexity is too great, or None
|
|
|
|
if unable to be fetched
|
|
|
|
"""
|
2021-09-29 06:44:15 -04:00
|
|
|
max_complexity = self.hs.config.server.limit_remote_rooms.complexity
|
2020-05-11 15:12:46 -04:00
|
|
|
complexity = await self.federation_handler.get_room_complexity(
|
2019-07-29 12:47:27 -04:00
|
|
|
remote_room_hosts, room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
if complexity:
|
2019-09-03 16:05:06 -04:00
|
|
|
return complexity["v1"] > max_complexity
|
2019-07-29 12:47:27 -04:00
|
|
|
return None
|
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _is_local_room_too_complex(self, room_id: str) -> bool:
|
2019-07-29 12:47:27 -04:00
|
|
|
"""
|
|
|
|
Check if the complexity of a local room is too great.
|
|
|
|
|
|
|
|
Args:
|
2020-05-15 15:05:25 -04:00
|
|
|
room_id: The room ID to check for complexity.
|
2019-07-29 12:47:27 -04:00
|
|
|
"""
|
2021-09-29 06:44:15 -04:00
|
|
|
max_complexity = self.hs.config.server.limit_remote_rooms.complexity
|
2020-05-15 09:32:13 -04:00
|
|
|
complexity = await self.store.get_room_complexity(room_id)
|
2019-07-29 12:47:27 -04:00
|
|
|
|
2019-09-03 16:05:06 -04:00
|
|
|
return complexity["v1"] > max_complexity
|
2019-07-29 12:47:27 -04:00
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _remote_join(
|
|
|
|
self,
|
|
|
|
requester: Requester,
|
|
|
|
remote_room_hosts: List[str],
|
|
|
|
room_id: str,
|
|
|
|
user: UserID,
|
|
|
|
content: dict,
|
2020-05-22 09:21:54 -04:00
|
|
|
) -> Tuple[str, int]:
|
2018-03-13 09:49:13 -04:00
|
|
|
"""Implements RoomMemberHandler._remote_join"""
|
2018-04-09 07:56:22 -04:00
|
|
|
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
|
|
|
|
# and if it is the only entry we'd like to return a 404 rather than a
|
|
|
|
# 500.
|
|
|
|
remote_room_hosts = [
|
|
|
|
host for host in remote_room_hosts if host != self.hs.hostname
|
|
|
|
]
|
|
|
|
|
2018-03-13 09:49:13 -04:00
|
|
|
if len(remote_room_hosts) == 0:
|
2023-02-10 18:31:05 -05:00
|
|
|
raise NoKnownServersError(
|
2022-07-27 06:37:50 -04:00
|
|
|
"Can't join remote room because no servers "
|
|
|
|
"that are in the room have been provided.",
|
|
|
|
)
|
2018-03-13 09:49:13 -04:00
|
|
|
|
2021-10-06 10:47:41 -04:00
|
|
|
check_complexity = self.hs.config.server.limit_remote_rooms.enabled
|
|
|
|
if (
|
|
|
|
check_complexity
|
|
|
|
and self.hs.config.server.limit_remote_rooms.admins_can_join
|
|
|
|
):
|
2022-08-22 09:17:59 -04:00
|
|
|
check_complexity = not await self.store.is_server_admin(user)
|
2020-07-28 08:41:44 -04:00
|
|
|
|
|
|
|
if check_complexity:
|
2019-07-29 12:47:27 -04:00
|
|
|
# Fetch the room complexity
|
2020-05-01 10:15:36 -04:00
|
|
|
too_complex = await self._is_remote_room_too_complex(
|
2019-07-29 12:47:27 -04:00
|
|
|
room_id, remote_room_hosts
|
|
|
|
)
|
|
|
|
if too_complex is True:
|
|
|
|
raise SynapseError(
|
|
|
|
code=400,
|
2021-09-29 06:44:15 -04:00
|
|
|
msg=self.hs.config.server.limit_remote_rooms.complexity_error,
|
2019-07-29 12:47:27 -04:00
|
|
|
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
|
|
|
)
|
|
|
|
|
2018-03-13 09:49:13 -04:00
|
|
|
# We don't do an auth check if we are doing an invite
|
|
|
|
# join dance for now, since we're kinda implicitly checking
|
|
|
|
# that we are allowed to join when we decide whether or not we
|
|
|
|
# need to do the invite/join dance.
|
2020-05-22 09:21:54 -04:00
|
|
|
event_id, stream_id = await self.federation_handler.do_invite_join(
|
2020-05-01 10:15:36 -04:00
|
|
|
remote_room_hosts, room_id, user.to_string(), content
|
2018-03-13 09:49:13 -04:00
|
|
|
)
|
|
|
|
|
2019-07-29 12:47:27 -04:00
|
|
|
# Check the room we just joined wasn't too large, if we didn't fetch the
|
|
|
|
# complexity of it before.
|
2020-07-28 08:41:44 -04:00
|
|
|
if check_complexity:
|
2019-07-29 12:47:27 -04:00
|
|
|
if too_complex is False:
|
|
|
|
# We checked, and we're under the limit.
|
2020-05-22 09:21:54 -04:00
|
|
|
return event_id, stream_id
|
2019-07-29 12:47:27 -04:00
|
|
|
|
|
|
|
# Check again, but with the local state events
|
2020-05-01 10:15:36 -04:00
|
|
|
too_complex = await self._is_local_room_too_complex(room_id)
|
2019-07-29 12:47:27 -04:00
|
|
|
|
|
|
|
if too_complex is False:
|
|
|
|
# We're under the limit.
|
2020-05-22 09:21:54 -04:00
|
|
|
return event_id, stream_id
|
2019-07-29 12:47:27 -04:00
|
|
|
|
|
|
|
# The room is too large. Leave.
|
2020-11-17 05:51:25 -05:00
|
|
|
requester = types.create_requester(
|
|
|
|
user, authenticated_entity=self._server_name
|
|
|
|
)
|
2020-05-01 10:15:36 -04:00
|
|
|
await self.update_membership(
|
2019-07-29 12:47:27 -04:00
|
|
|
requester=requester, target=user, room_id=room_id, action="leave"
|
|
|
|
)
|
|
|
|
raise SynapseError(
|
|
|
|
code=400,
|
2021-09-29 06:44:15 -04:00
|
|
|
msg=self.hs.config.server.limit_remote_rooms.complexity_error,
|
2019-07-29 12:47:27 -04:00
|
|
|
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
|
|
|
)
|
|
|
|
|
2020-05-22 09:21:54 -04:00
|
|
|
return event_id, stream_id
|
|
|
|
|
2020-07-09 05:40:19 -04:00
|
|
|
async def remote_reject_invite(
|
2020-05-15 15:05:25 -04:00
|
|
|
self,
|
2020-07-09 05:40:19 -04:00
|
|
|
invite_event_id: str,
|
|
|
|
txn_id: Optional[str],
|
2020-05-15 15:05:25 -04:00
|
|
|
requester: Requester,
|
2020-07-09 05:40:19 -04:00
|
|
|
content: JsonDict,
|
2020-07-09 08:01:42 -04:00
|
|
|
) -> Tuple[str, int]:
|
2018-03-13 09:49:13 -04:00
|
|
|
"""
|
2020-07-09 05:40:19 -04:00
|
|
|
Rejects an out-of-band invite received from a remote user
|
|
|
|
|
|
|
|
Implements RoomMemberHandler.remote_reject_invite
|
|
|
|
"""
|
|
|
|
invite_event = await self.store.get_event(invite_event_id)
|
|
|
|
room_id = invite_event.room_id
|
|
|
|
target_user = invite_event.state_key
|
|
|
|
|
|
|
|
# first of all, try doing a rejection via the inviting server
|
2018-03-13 09:49:13 -04:00
|
|
|
fed_handler = self.federation_handler
|
|
|
|
try:
|
2020-07-09 05:40:19 -04:00
|
|
|
inviter_id = UserID.from_string(invite_event.sender)
|
2020-05-22 09:21:54 -04:00
|
|
|
event, stream_id = await fed_handler.do_remotely_reject_invite(
|
2020-07-09 05:40:19 -04:00
|
|
|
[inviter_id.domain], room_id, target_user, content=content
|
2018-03-13 09:49:13 -04:00
|
|
|
)
|
2020-05-22 09:21:54 -04:00
|
|
|
return event.event_id, stream_id
|
2018-03-13 09:49:13 -04:00
|
|
|
except Exception as e:
|
2020-07-09 05:40:19 -04:00
|
|
|
# if we were unable to reject the invite, we will generate our own
|
|
|
|
# leave event.
|
2018-03-13 09:49:13 -04:00
|
|
|
#
|
|
|
|
# The 'except' clause is very broad, but we need to
|
|
|
|
# capture everything from DNS failures upwards
|
|
|
|
#
|
2019-09-11 11:02:42 -04:00
|
|
|
logger.warning("Failed to reject invite: %s", e)
|
2018-03-13 09:49:13 -04:00
|
|
|
|
2020-11-16 10:37:36 -05:00
|
|
|
return await self._generate_local_out_of_band_leave(
|
2020-07-09 05:40:19 -04:00
|
|
|
invite_event, txn_id, requester, content
|
|
|
|
)
|
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
async def remote_rescind_knock(
|
|
|
|
self,
|
|
|
|
knock_event_id: str,
|
|
|
|
txn_id: Optional[str],
|
|
|
|
requester: Requester,
|
|
|
|
content: JsonDict,
|
|
|
|
) -> Tuple[str, int]:
|
|
|
|
"""
|
|
|
|
Rescinds a local knock made on a remote room
|
|
|
|
|
|
|
|
Args:
|
|
|
|
knock_event_id: The ID of the knock event to rescind.
|
|
|
|
txn_id: The transaction ID to use.
|
|
|
|
requester: The originator of the request.
|
|
|
|
content: The content of the leave event.
|
|
|
|
|
|
|
|
Implements RoomMemberHandler.remote_rescind_knock
|
|
|
|
"""
|
|
|
|
# TODO: We don't yet support rescinding knocks over federation
|
|
|
|
# as we don't know which homeserver to send it to. An obvious
|
|
|
|
# candidate is the remote homeserver we originally knocked through,
|
|
|
|
# however we don't currently store that information.
|
|
|
|
|
|
|
|
# Just rescind the knock locally
|
|
|
|
knock_event = await self.store.get_event(knock_event_id)
|
|
|
|
return await self._generate_local_out_of_band_leave(
|
|
|
|
knock_event, txn_id, requester, content
|
|
|
|
)
|
|
|
|
|
2020-11-16 10:37:36 -05:00
|
|
|
async def _generate_local_out_of_band_leave(
|
2020-07-09 05:40:19 -04:00
|
|
|
self,
|
2020-11-16 10:37:36 -05:00
|
|
|
previous_membership_event: EventBase,
|
2020-07-09 05:40:19 -04:00
|
|
|
txn_id: Optional[str],
|
|
|
|
requester: Requester,
|
|
|
|
content: JsonDict,
|
|
|
|
) -> Tuple[str, int]:
|
2020-11-16 10:37:36 -05:00
|
|
|
"""Generate a local leave event for a room
|
2020-07-09 05:40:19 -04:00
|
|
|
|
2020-11-16 10:37:36 -05:00
|
|
|
This can be called after we e.g fail to reject an invite via a remote server.
|
|
|
|
It generates an out-of-band membership event locally.
|
2020-07-09 05:40:19 -04:00
|
|
|
|
|
|
|
Args:
|
2020-11-16 10:37:36 -05:00
|
|
|
previous_membership_event: the previous membership event for this user
|
2020-07-09 05:40:19 -04:00
|
|
|
txn_id: optional transaction ID supplied by the client
|
2020-11-16 10:37:36 -05:00
|
|
|
requester: user making the request, according to the access token
|
|
|
|
content: additional content to include in the leave event.
|
2020-07-09 05:40:19 -04:00
|
|
|
Normally an empty dict.
|
|
|
|
|
2020-11-16 10:37:36 -05:00
|
|
|
Returns:
|
|
|
|
A tuple containing (event_id, stream_id of the leave event)
|
|
|
|
"""
|
|
|
|
room_id = previous_membership_event.room_id
|
|
|
|
target_user = previous_membership_event.state_key
|
2020-07-09 05:40:19 -04:00
|
|
|
|
|
|
|
content["membership"] = Membership.LEAVE
|
|
|
|
|
|
|
|
event_dict = {
|
|
|
|
"type": EventTypes.Member,
|
|
|
|
"room_id": room_id,
|
|
|
|
"sender": target_user,
|
|
|
|
"content": content,
|
|
|
|
"state_key": target_user,
|
|
|
|
}
|
|
|
|
|
2020-11-16 10:37:36 -05:00
|
|
|
# the auth events for the new event are the same as that of the previous event, plus
|
|
|
|
# the event itself.
|
2020-10-13 18:14:35 -04:00
|
|
|
#
|
2020-11-16 10:37:36 -05:00
|
|
|
# the prev_events consist solely of the previous membership event.
|
|
|
|
prev_event_ids = [previous_membership_event.event_id]
|
2021-11-02 09:55:52 -04:00
|
|
|
auth_event_ids = (
|
|
|
|
list(previous_membership_event.auth_event_ids()) + prev_event_ids
|
|
|
|
)
|
2020-10-13 18:14:35 -04:00
|
|
|
|
2022-12-15 11:04:23 -05:00
|
|
|
# Try several times, it could fail with PartialStateConflictError
|
|
|
|
# in handle_new_client_event, cf comment in except block.
|
|
|
|
max_retries = 5
|
|
|
|
for i in range(max_retries):
|
|
|
|
try:
|
2023-02-24 16:15:29 -05:00
|
|
|
(
|
|
|
|
event,
|
|
|
|
unpersisted_context,
|
|
|
|
) = await self.event_creation_handler.create_event(
|
2022-12-15 11:04:23 -05:00
|
|
|
requester,
|
|
|
|
event_dict,
|
|
|
|
txn_id=txn_id,
|
|
|
|
prev_event_ids=prev_event_ids,
|
|
|
|
auth_event_ids=auth_event_ids,
|
|
|
|
outlier=True,
|
|
|
|
)
|
2023-02-24 16:15:29 -05:00
|
|
|
context = await unpersisted_context.persist(event)
|
2022-12-15 11:04:23 -05:00
|
|
|
event.internal_metadata.out_of_band_membership = True
|
|
|
|
|
|
|
|
result_event = (
|
|
|
|
await self.event_creation_handler.handle_new_client_event(
|
|
|
|
requester,
|
|
|
|
events_and_context=[(event, context)],
|
|
|
|
extra_users=[UserID.from_string(target_user)],
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
break
|
|
|
|
except PartialStateConflictError as e:
|
|
|
|
# Persisting couldn't happen because the room got un-partial stated
|
|
|
|
# in the meantime and context needs to be recomputed, so let's do so.
|
|
|
|
if i == max_retries - 1:
|
|
|
|
raise e
|
2020-07-09 05:40:19 -04:00
|
|
|
|
2020-10-02 11:45:41 -04:00
|
|
|
# we know it was persisted, so must have a stream ordering
|
|
|
|
assert result_event.internal_metadata.stream_ordering
|
|
|
|
|
|
|
|
return result_event.event_id, result_event.internal_metadata.stream_ordering
|
2018-03-13 09:49:13 -04:00
|
|
|
|
2021-06-09 14:39:51 -04:00
|
|
|
async def remote_knock(
|
|
|
|
self,
|
2023-03-02 12:59:53 -05:00
|
|
|
requester: Requester,
|
2021-06-09 14:39:51 -04:00
|
|
|
remote_room_hosts: List[str],
|
|
|
|
room_id: str,
|
|
|
|
user: UserID,
|
|
|
|
content: dict,
|
|
|
|
) -> Tuple[str, int]:
|
|
|
|
"""Sends a knock to a room. Attempts to do so via one remote out of a given list.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
remote_room_hosts: A list of homeservers to try knocking through.
|
|
|
|
room_id: The ID of the room to knock on.
|
|
|
|
user: The user to knock on behalf of.
|
|
|
|
content: The content of the knock event.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple of (event ID, stream ID).
|
|
|
|
"""
|
|
|
|
# filter ourselves out of remote_room_hosts
|
|
|
|
remote_room_hosts = [
|
|
|
|
host for host in remote_room_hosts if host != self.hs.hostname
|
|
|
|
]
|
|
|
|
|
|
|
|
if len(remote_room_hosts) == 0:
|
2023-02-10 18:31:05 -05:00
|
|
|
raise NoKnownServersError()
|
2021-06-09 14:39:51 -04:00
|
|
|
|
|
|
|
return await self.federation_handler.do_knock(
|
|
|
|
remote_room_hosts, room_id, user.to_string(), content=content
|
|
|
|
)
|
|
|
|
|
2020-05-15 15:05:25 -04:00
|
|
|
async def _user_left_room(self, target: UserID, room_id: str) -> None:
|
2018-03-13 12:00:26 -04:00
|
|
|
"""Implements RoomMemberHandler._user_left_room"""
|
2020-05-15 15:05:25 -04:00
|
|
|
user_left_room(self.distributor, target, room_id)
|
2018-03-13 12:00:26 -04:00
|
|
|
|
2018-03-13 09:49:13 -04:00
|
|
|
|
2023-05-03 07:27:33 -04:00
|
|
|
class RoomForgetterHandler(StateDeltasHandler):
|
|
|
|
"""Forgets rooms when they are left, when enabled in the homeserver config.
|
2018-03-13 09:49:13 -04:00
|
|
|
|
2023-05-03 07:27:33 -04:00
|
|
|
For the purposes of this feature, kicks, bans and "leaves" via state resolution
|
|
|
|
weirdness are all considered to be leaves.
|
2018-03-13 09:49:13 -04:00
|
|
|
|
2023-05-03 07:27:33 -04:00
|
|
|
Derived from `StatsHandler` and `UserDirectoryHandler`.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, hs: "HomeServer"):
|
|
|
|
super().__init__(hs)
|
|
|
|
|
|
|
|
self._hs = hs
|
|
|
|
self._store = hs.get_datastores().main
|
|
|
|
self._storage_controllers = hs.get_storage_controllers()
|
|
|
|
self._clock = hs.get_clock()
|
|
|
|
self._notifier = hs.get_notifier()
|
|
|
|
self._room_member_handler = hs.get_room_member_handler()
|
|
|
|
|
|
|
|
# The current position in the current_state_delta stream
|
|
|
|
self.pos: Optional[int] = None
|
|
|
|
|
|
|
|
# Guard to ensure we only process deltas one at a time
|
|
|
|
self._is_processing = False
|
|
|
|
|
|
|
|
if hs.config.worker.run_background_tasks:
|
|
|
|
self._notifier.add_replication_callback(self.notify_new_event)
|
|
|
|
|
|
|
|
# We kick this off to pick up outstanding work from before the last restart.
|
|
|
|
self._clock.call_later(0, self.notify_new_event)
|
|
|
|
|
|
|
|
def notify_new_event(self) -> None:
|
|
|
|
"""Called when there may be more deltas to process"""
|
|
|
|
if self._is_processing:
|
|
|
|
return
|
|
|
|
|
|
|
|
self._is_processing = True
|
|
|
|
|
|
|
|
async def process() -> None:
|
|
|
|
try:
|
|
|
|
await self._unsafe_process()
|
|
|
|
finally:
|
|
|
|
self._is_processing = False
|
|
|
|
|
|
|
|
run_as_background_process("room_forgetter.notify_new_event", process)
|
|
|
|
|
|
|
|
async def _unsafe_process(self) -> None:
|
|
|
|
# If self.pos is None then means we haven't fetched it from DB
|
|
|
|
if self.pos is None:
|
|
|
|
self.pos = await self._store.get_room_forgetter_stream_pos()
|
|
|
|
room_max_stream_ordering = self._store.get_room_max_stream_ordering()
|
|
|
|
if self.pos > room_max_stream_ordering:
|
|
|
|
# apparently, we've processed more events than exist in the database!
|
|
|
|
# this can happen if events are removed with history purge or similar.
|
|
|
|
logger.warning(
|
|
|
|
"Event stream ordering appears to have gone backwards (%i -> %i): "
|
|
|
|
"rewinding room forgetter processor",
|
|
|
|
self.pos,
|
|
|
|
room_max_stream_ordering,
|
|
|
|
)
|
|
|
|
self.pos = room_max_stream_ordering
|
|
|
|
|
|
|
|
if not self._hs.config.room.forget_on_leave:
|
2023-11-29 13:21:30 -05:00
|
|
|
# Update the processing position, so that if the server admin turns
|
|
|
|
# the feature on at a later date, we don't decide to forget every
|
|
|
|
# room that has ever been left in the past.
|
|
|
|
#
|
|
|
|
# We wait for a short time so that we don't "tight" loop just
|
|
|
|
# keeping the table up to date.
|
|
|
|
await self._clock.sleep(0.5)
|
|
|
|
|
2023-05-03 07:27:33 -04:00
|
|
|
self.pos = self._store.get_room_max_stream_ordering()
|
|
|
|
await self._store.update_room_forgetter_stream_pos(self.pos)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Loop round handling deltas until we're up to date
|
|
|
|
|
|
|
|
while True:
|
|
|
|
# Be sure to read the max stream_ordering *before* checking if there are any outstanding
|
|
|
|
# deltas, since there is otherwise a chance that we could miss updates which arrive
|
|
|
|
# after we check the deltas.
|
|
|
|
room_max_stream_ordering = self._store.get_room_max_stream_ordering()
|
|
|
|
if self.pos == room_max_stream_ordering:
|
|
|
|
break
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"Processing room forgetting %s->%s", self.pos, room_max_stream_ordering
|
|
|
|
)
|
|
|
|
(
|
|
|
|
max_pos,
|
|
|
|
deltas,
|
|
|
|
) = await self._storage_controllers.state.get_current_state_deltas(
|
|
|
|
self.pos, room_max_stream_ordering
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug("Handling %d state deltas", len(deltas))
|
|
|
|
await self._handle_deltas(deltas)
|
|
|
|
|
|
|
|
self.pos = max_pos
|
|
|
|
|
|
|
|
# Expose current event processing position to prometheus
|
|
|
|
event_processing_positions.labels("room_forgetter").set(max_pos)
|
|
|
|
|
|
|
|
await self._store.update_room_forgetter_stream_pos(max_pos)
|
|
|
|
|
2023-10-16 07:35:22 -04:00
|
|
|
async def _handle_deltas(self, deltas: List[StateDelta]) -> None:
|
2023-05-03 07:27:33 -04:00
|
|
|
"""Called with the state deltas to process"""
|
|
|
|
for delta in deltas:
|
2023-10-16 07:35:22 -04:00
|
|
|
if delta.event_type != EventTypes.Member:
|
2023-05-03 07:27:33 -04:00
|
|
|
continue
|
|
|
|
|
2023-10-16 07:35:22 -04:00
|
|
|
if not self._hs.is_mine_id(delta.state_key):
|
2023-05-03 07:27:33 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
change = await self._get_key_change(
|
2023-10-16 07:35:22 -04:00
|
|
|
delta.prev_event_id,
|
|
|
|
delta.event_id,
|
2023-05-03 07:27:33 -04:00
|
|
|
key_name="membership",
|
|
|
|
public_value=Membership.JOIN,
|
|
|
|
)
|
|
|
|
is_leave = change is MatchChange.now_false
|
|
|
|
|
|
|
|
if is_leave:
|
|
|
|
try:
|
|
|
|
await self._room_member_handler.forget(
|
2023-10-16 07:35:22 -04:00
|
|
|
UserID.from_string(delta.state_key), delta.room_id
|
2023-05-03 07:27:33 -04:00
|
|
|
)
|
|
|
|
except SynapseError as e:
|
|
|
|
if e.code == 400:
|
|
|
|
# The user is back in the room.
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise
|
2021-07-26 12:17:00 -04:00
|
|
|
|
|
|
|
|
|
|
|
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
|
|
|
|
"""
|
|
|
|
Return the list of users which can issue invites.
|
|
|
|
|
|
|
|
This is done by exploring the joined users and comparing their power levels
|
|
|
|
to the necessyar power level to issue an invite.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
auth_events: state in force at this point in the room
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The users which can issue invites.
|
|
|
|
"""
|
|
|
|
invite_level = get_named_level(auth_events, "invite", 0)
|
|
|
|
users_default_level = get_named_level(auth_events, "users_default", 0)
|
|
|
|
power_level_event = get_power_level_event(auth_events)
|
|
|
|
|
|
|
|
# Custom power-levels for users.
|
|
|
|
if power_level_event:
|
|
|
|
users = power_level_event.content.get("users", {})
|
|
|
|
else:
|
|
|
|
users = {}
|
|
|
|
|
|
|
|
result = []
|
|
|
|
|
|
|
|
# Check which members are able to invite by ensuring they're joined and have
|
|
|
|
# the necessary power level.
|
|
|
|
for (event_type, state_key), event in auth_events.items():
|
|
|
|
if event_type != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if event.membership != Membership.JOIN:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Check if the user has a custom power level.
|
|
|
|
if users.get(state_key, users_default_level) >= invite_level:
|
|
|
|
result.append(state_key)
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
def get_servers_from_users(users: List[str]) -> Set[str]:
|
|
|
|
"""
|
|
|
|
Resolve a list of users into their servers.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
users: A list of users.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A set of servers.
|
|
|
|
"""
|
|
|
|
servers = set()
|
|
|
|
for user in users:
|
|
|
|
try:
|
|
|
|
servers.add(get_domain_from_id(user))
|
|
|
|
except SynapseError:
|
|
|
|
pass
|
|
|
|
return servers
|