Merge remote-tracking branch 'upstream/release-v1.62'

This commit is contained in:
Tulir Asokan 2022-06-28 19:31:54 +03:00
commit ba9c553c5e
224 changed files with 5898 additions and 8387 deletions

View file

@ -59,6 +59,7 @@ from synapse.federation.federation_client import InvalidResponseError
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import nested_logging_context
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.module_api import NOT_SPAM
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
ReplicationStoreRoomOnOutlierMembershipRestServlet,
@ -545,6 +546,7 @@ class FederationHandler:
if ret.partial_state:
# TODO(faster_joins): roll this back if we don't manage to start the
# background resync (eg process_remote_join fails)
# https://github.com/matrix-org/synapse/issues/12998
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
max_stream_id = await self._federation_event_handler.process_remote_join(
@ -799,9 +801,7 @@ class FederationHandler:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
await self._event_auth_handler.check_auth_rules_from_context(
room_version, event, context
)
await self._event_auth_handler.check_auth_rules_from_context(event, context)
return event
async def on_invite_request(
@ -821,11 +821,14 @@ class FederationHandler:
if self.hs.config.server.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
if not await self.spam_checker.user_may_invite(
spam_check = await self.spam_checker.user_may_invite(
event.sender, event.state_key, event.room_id
):
)
if spam_check != NOT_SPAM:
raise SynapseError(
403, "This user is not permitted to send invites to this server/user"
403,
"This user is not permitted to send invites to this server/user",
spam_check,
)
membership = event.content.get("membership")
@ -972,9 +975,7 @@ class FederationHandler:
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
await self._event_auth_handler.check_auth_rules_from_context(
room_version_obj, event, context
)
await self._event_auth_handler.check_auth_rules_from_context(event, context)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
raise e
@ -1033,9 +1034,7 @@ class FederationHandler:
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
await self._event_auth_handler.check_auth_rules_from_context(
room_version_obj, event, context
)
await self._event_auth_handler.check_auth_rules_from_context(event, context)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
raise e
@ -1206,9 +1205,9 @@ class FederationHandler:
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
validate_event_for_room_version(room_version_obj, event)
validate_event_for_room_version(event)
await self._event_auth_handler.check_auth_rules_from_context(
room_version_obj, event, context
event, context
)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
@ -1258,10 +1257,8 @@ class FederationHandler:
)
try:
validate_event_for_room_version(room_version_obj, event)
await self._event_auth_handler.check_auth_rules_from_context(
room_version_obj, event, context
)
validate_event_for_room_version(event)
await self._event_auth_handler.check_auth_rules_from_context(event, context)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
@ -1506,14 +1503,17 @@ class FederationHandler:
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
# worker processes kick off a resync in parallel? Perhaps we should just elect
# a single worker to do the resync.
# https://github.com/matrix-org/synapse/issues/12994
#
# TODO(faster_joins): what happens if we leave the room during a resync? if we
# really leave, that might mean we have difficulty getting the room state over
# federation.
# https://github.com/matrix-org/synapse/issues/12802
#
# TODO(faster_joins): we need some way of prioritising which homeservers in
# `other_destinations` to try first, otherwise we'll spend ages trying dead
# homeservers for large rooms.
# https://github.com/matrix-org/synapse/issues/12999
if initial_destination is None and len(other_destinations) == 0:
raise ValueError(
@ -1543,9 +1543,11 @@ class FederationHandler:
# all the events are updated, so we can update current state and
# clear the lazy-loading flag.
logger.info("Updating current state for %s", room_id)
# TODO(faster_joins): support workers
# https://github.com/matrix-org/synapse/issues/12994
assert (
self._storage_controllers.persistence is not None
), "TODO(faster_joins): support for workers"
), "worker-mode deployments not currently supported here"
await self._storage_controllers.persistence.update_current_state(
room_id
)
@ -1559,6 +1561,8 @@ class FederationHandler:
)
# TODO(faster_joins) update room stats and user directory?
# https://github.com/matrix-org/synapse/issues/12814
# https://github.com/matrix-org/synapse/issues/12815
return
# we raced against more events arriving with partial state. Go round
@ -1566,6 +1570,8 @@ class FederationHandler:
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
# having partial state.
# https://github.com/matrix-org/synapse/issues/12988
#
continue
events = await self.store.get_events_as_list(
@ -1588,6 +1594,7 @@ class FederationHandler:
# indefinitely is also not the right thing to do if we can
# reach all homeservers and they all claim they don't have
# the state we want.
# https://github.com/matrix-org/synapse/issues/13000
logger.error(
"Failed to get state for %s at %s from %s because %s, "
"giving up!",