2019-10-23 07:00:21 -04:00
|
|
|
#
|
2023-11-21 15:29:58 -05:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
2024-01-23 06:26:48 -05:00
|
|
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2023-11-21 15:29:58 -05:00
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2019-10-23 07:00:21 -04:00
|
|
|
#
|
|
|
|
#
|
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
import itertools
|
2019-10-23 07:00:21 -04:00
|
|
|
import logging
|
2021-06-14 06:59:27 -04:00
|
|
|
from collections import deque
|
|
|
|
from typing import (
|
2021-10-22 13:15:41 -04:00
|
|
|
TYPE_CHECKING,
|
2023-09-18 09:29:05 -04:00
|
|
|
AbstractSet,
|
2021-06-16 06:41:15 -04:00
|
|
|
Any,
|
2021-06-14 06:59:27 -04:00
|
|
|
Awaitable,
|
|
|
|
Callable,
|
2022-07-07 08:19:31 -04:00
|
|
|
ClassVar,
|
2021-06-14 06:59:27 -04:00
|
|
|
Collection,
|
|
|
|
Deque,
|
|
|
|
Dict,
|
2022-05-12 10:33:50 -04:00
|
|
|
Generator,
|
2021-06-14 06:59:27 -04:00
|
|
|
Generic,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
TypeVar,
|
2022-07-07 08:19:31 -04:00
|
|
|
Union,
|
2021-06-14 06:59:27 -04:00
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
import attr
|
2019-10-23 07:00:21 -04:00
|
|
|
from prometheus_client import Counter, Histogram
|
|
|
|
|
|
|
|
from twisted.internet import defer
|
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
from synapse.api.constants import EventTypes, Membership
|
2020-07-28 16:09:53 -04:00
|
|
|
from synapse.events import EventBase
|
2020-01-20 13:07:20 -05:00
|
|
|
from synapse.events.snapshot import EventContext
|
2023-08-16 10:19:54 -04:00
|
|
|
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
2019-10-23 07:00:21 -04:00
|
|
|
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
2022-08-16 13:39:40 -04:00
|
|
|
from synapse.logging.opentracing import (
|
|
|
|
SynapseTags,
|
|
|
|
active_span,
|
|
|
|
set_tag,
|
|
|
|
start_active_span_follows_from,
|
|
|
|
trace,
|
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2022-07-14 09:57:02 -04:00
|
|
|
from synapse.storage.controllers.state import StateStorageController
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.databases import Databases
|
|
|
|
from synapse.storage.databases.main.events import DeltaState
|
2020-12-18 04:49:18 -05:00
|
|
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
|
|
|
from synapse.types import (
|
|
|
|
PersistedEventPosition,
|
|
|
|
RoomStreamToken,
|
|
|
|
StateMap,
|
|
|
|
get_domain_from_id,
|
|
|
|
)
|
2022-12-12 11:19:30 -05:00
|
|
|
from synapse.types.state import StateFilter
|
2021-06-14 06:59:27 -04:00
|
|
|
from synapse.util.async_helpers import ObservableDeferred, yieldable_gather_results
|
2019-10-23 07:00:21 -04:00
|
|
|
from synapse.util.metrics import Measure
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2019-10-23 07:00:21 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
# The number of times we are recalculating the current state
|
|
|
|
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
|
|
|
|
|
|
|
|
# The number of times we are recalculating state when there is only a
|
|
|
|
# single forward extremity
|
|
|
|
state_delta_single_event_counter = Counter(
|
|
|
|
"synapse_storage_events_state_delta_single_event", ""
|
|
|
|
)
|
|
|
|
|
|
|
|
# The number of times we are reculating state when we could have resonably
|
|
|
|
# calculated the delta when we calculated the state for an event we were
|
|
|
|
# persisting.
|
|
|
|
state_delta_reuse_delta_counter = Counter(
|
|
|
|
"synapse_storage_events_state_delta_reuse_delta", ""
|
|
|
|
)
|
|
|
|
|
|
|
|
# The number of forward extremities for each new event.
|
|
|
|
forward_extremities_counter = Histogram(
|
|
|
|
"synapse_storage_events_forward_extremities_persisted",
|
|
|
|
"Number of forward extremities for each new event",
|
|
|
|
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
|
|
|
)
|
|
|
|
|
|
|
|
# The number of stale forward extremities for each new event. Stale extremities
|
|
|
|
# are those that were in the previous set of extremities as well as the new.
|
|
|
|
stale_forward_extremities_counter = Histogram(
|
|
|
|
"synapse_storage_events_stale_forward_extremities_persisted",
|
|
|
|
"Number of unchanged forward extremities for each new event",
|
|
|
|
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
|
|
|
)
|
|
|
|
|
2020-12-18 04:49:18 -05:00
|
|
|
state_resolutions_during_persistence = Counter(
|
|
|
|
"synapse_storage_events_state_resolutions_during_persistence",
|
|
|
|
"Number of times we had to do state res to calculate new current state",
|
|
|
|
)
|
|
|
|
|
|
|
|
potential_times_prune_extremities = Counter(
|
|
|
|
"synapse_storage_events_potential_times_prune_extremities",
|
|
|
|
"Number of times we might be able to prune extremities",
|
|
|
|
)
|
|
|
|
|
|
|
|
times_pruned_extremities = Counter(
|
|
|
|
"synapse_storage_events_times_pruned_extremities",
|
|
|
|
"Number of times we were actually be able to prune extremities",
|
|
|
|
)
|
|
|
|
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2021-06-16 06:41:15 -04:00
|
|
|
@attr.s(auto_attribs=True, slots=True)
|
2022-07-07 08:19:31 -04:00
|
|
|
class _PersistEventsTask:
|
|
|
|
"""A batch of events to persist."""
|
|
|
|
|
|
|
|
name: ClassVar[str] = "persist_event_batch" # used for opentracing
|
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]]
|
|
|
|
backfilled: bool
|
2022-07-07 08:19:31 -04:00
|
|
|
|
|
|
|
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
|
|
|
|
"""Batches events with the same backfilled option together."""
|
|
|
|
if (
|
|
|
|
not isinstance(task, _PersistEventsTask)
|
|
|
|
or self.backfilled != task.backfilled
|
|
|
|
):
|
|
|
|
return False
|
|
|
|
|
|
|
|
self.events_and_contexts.extend(task.events_and_contexts)
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
@attr.s(auto_attribs=True, slots=True)
|
|
|
|
class _UpdateCurrentStateTask:
|
|
|
|
"""A room whose current state needs recalculating."""
|
|
|
|
|
|
|
|
name: ClassVar[str] = "update_current_state" # used for opentracing
|
|
|
|
|
|
|
|
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
|
|
|
|
"""Deduplicates consecutive recalculations of current state."""
|
|
|
|
return isinstance(task, _UpdateCurrentStateTask)
|
|
|
|
|
|
|
|
|
|
|
|
_EventPersistQueueTask = Union[_PersistEventsTask, _UpdateCurrentStateTask]
|
2023-09-08 14:29:38 -04:00
|
|
|
_PersistResult = TypeVar("_PersistResult")
|
2022-07-07 08:19:31 -04:00
|
|
|
|
|
|
|
|
|
|
|
@attr.s(auto_attribs=True, slots=True)
|
2023-09-08 14:29:38 -04:00
|
|
|
class _EventPersistQueueItem(Generic[_PersistResult]):
|
2022-07-07 08:19:31 -04:00
|
|
|
task: _EventPersistQueueTask
|
2023-09-08 14:29:38 -04:00
|
|
|
deferred: ObservableDeferred[_PersistResult]
|
2021-06-14 06:59:27 -04:00
|
|
|
|
2021-06-17 06:21:53 -04:00
|
|
|
parent_opentracing_span_contexts: List = attr.ib(factory=list)
|
2021-06-16 06:41:15 -04:00
|
|
|
"""A list of opentracing spans waiting for this batch"""
|
|
|
|
|
|
|
|
opentracing_span_context: Any = None
|
|
|
|
"""The opentracing span under which the persistence actually happened"""
|
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
|
|
|
|
class _EventPeristenceQueue(Generic[_PersistResult]):
|
2022-07-07 08:19:31 -04:00
|
|
|
"""Queues up tasks so that they can be processed with only one concurrent
|
|
|
|
transaction per room.
|
|
|
|
|
|
|
|
Tasks can be bulk persistence of events or recalculation of a room's current state.
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
per_item_callback: Callable[
|
2022-07-07 08:19:31 -04:00
|
|
|
[str, _EventPersistQueueTask],
|
2021-06-14 06:59:27 -04:00
|
|
|
Awaitable[_PersistResult],
|
|
|
|
],
|
|
|
|
):
|
|
|
|
"""Create a new event persistence queue
|
|
|
|
|
|
|
|
The per_item_callback will be called for each item added via add_to_queue,
|
|
|
|
and its result will be returned via the Deferreds returned from add_to_queue.
|
|
|
|
"""
|
|
|
|
self._event_persist_queues: Dict[str, Deque[_EventPersistQueueItem]] = {}
|
|
|
|
self._currently_persisting_rooms: Set[str] = set()
|
|
|
|
self._per_item_callback = per_item_callback
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
async def add_to_queue(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
2022-07-07 08:19:31 -04:00
|
|
|
task: _EventPersistQueueTask,
|
2021-06-14 06:59:27 -04:00
|
|
|
) -> _PersistResult:
|
2022-07-07 08:19:31 -04:00
|
|
|
"""Add a task to the queue.
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2022-07-07 08:19:31 -04:00
|
|
|
If we are not already processing tasks in this room, starts off a background
|
2021-06-14 06:59:27 -04:00
|
|
|
process to to so, calling the per_item_callback for each item.
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
Args:
|
2022-11-16 10:25:24 -05:00
|
|
|
room_id:
|
|
|
|
task: A _PersistEventsTask or _UpdateCurrentStateTask to process.
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
Returns:
|
2021-06-14 06:59:27 -04:00
|
|
|
the result returned by the `_per_item_callback` passed to
|
|
|
|
`__init__`.
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
|
|
|
queue = self._event_persist_queues.setdefault(room_id, deque())
|
|
|
|
|
2022-07-07 08:19:31 -04:00
|
|
|
if queue and queue[-1].task.try_merge(task):
|
|
|
|
# the new task has been merged into the last task in the queue
|
2021-06-14 06:59:27 -04:00
|
|
|
end_item = queue[-1]
|
|
|
|
else:
|
2021-07-28 15:55:50 -04:00
|
|
|
deferred: ObservableDeferred[_PersistResult] = ObservableDeferred(
|
|
|
|
defer.Deferred(), consumeErrors=True
|
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
end_item = _EventPersistQueueItem(
|
2022-07-07 08:19:31 -04:00
|
|
|
task=task,
|
2019-10-23 07:00:21 -04:00
|
|
|
deferred=deferred,
|
|
|
|
)
|
2021-06-14 06:59:27 -04:00
|
|
|
queue.append(end_item)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2021-06-16 06:41:15 -04:00
|
|
|
# also add our active opentracing span to the item so that we get a link back
|
2022-08-16 13:39:40 -04:00
|
|
|
span = active_span()
|
2021-06-16 06:41:15 -04:00
|
|
|
if span:
|
|
|
|
end_item.parent_opentracing_span_contexts.append(span.context)
|
|
|
|
|
|
|
|
# start a processor for the queue, if there isn't one already
|
2021-06-14 06:59:27 -04:00
|
|
|
self._handle_queue(room_id)
|
2021-06-16 06:41:15 -04:00
|
|
|
|
|
|
|
# wait for the queue item to complete
|
|
|
|
res = await make_deferred_yieldable(end_item.deferred.observe())
|
|
|
|
|
|
|
|
# add another opentracing span which links to the persist trace.
|
2022-08-16 13:39:40 -04:00
|
|
|
with start_active_span_follows_from(
|
2022-07-07 08:19:31 -04:00
|
|
|
f"{task.name}_complete", (end_item.opentracing_span_context,)
|
2021-06-16 06:41:15 -04:00
|
|
|
):
|
|
|
|
pass
|
|
|
|
|
|
|
|
return res
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2022-05-12 10:33:50 -04:00
|
|
|
def _handle_queue(self, room_id: str) -> None:
|
2019-10-23 07:00:21 -04:00
|
|
|
"""Attempts to handle the queue for a room if not already being handled.
|
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
The queue's callback will be invoked with for each item in the queue,
|
2019-10-23 07:00:21 -04:00
|
|
|
of type _EventPersistQueueItem. The per_item_callback will continuously
|
2021-06-14 06:59:27 -04:00
|
|
|
be called with new items, unless the queue becomes empty. The return
|
2019-10-23 07:00:21 -04:00
|
|
|
value of the function will be given to the deferreds waiting on the item,
|
|
|
|
exceptions will be passed to the deferreds as well.
|
|
|
|
|
|
|
|
This function should therefore be called whenever anything is added
|
|
|
|
to the queue.
|
|
|
|
|
|
|
|
If another callback is currently handling the queue then it will not be
|
|
|
|
invoked.
|
|
|
|
"""
|
|
|
|
if room_id in self._currently_persisting_rooms:
|
|
|
|
return
|
|
|
|
|
|
|
|
self._currently_persisting_rooms.add(room_id)
|
|
|
|
|
2022-05-12 10:33:50 -04:00
|
|
|
async def handle_queue_loop() -> None:
|
2019-10-23 07:00:21 -04:00
|
|
|
try:
|
|
|
|
queue = self._get_drainining_queue(room_id)
|
|
|
|
for item in queue:
|
|
|
|
try:
|
2022-08-16 13:39:40 -04:00
|
|
|
with start_active_span_follows_from(
|
2022-07-07 08:19:31 -04:00
|
|
|
item.task.name,
|
2021-06-16 06:41:15 -04:00
|
|
|
item.parent_opentracing_span_contexts,
|
|
|
|
inherit_force_tracing=True,
|
|
|
|
) as scope:
|
|
|
|
if scope:
|
|
|
|
item.opentracing_span_context = scope.span.context
|
|
|
|
|
2022-07-07 08:19:31 -04:00
|
|
|
ret = await self._per_item_callback(room_id, item.task)
|
2019-10-23 07:00:21 -04:00
|
|
|
except Exception:
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
item.deferred.errback()
|
|
|
|
else:
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
item.deferred.callback(ret)
|
|
|
|
finally:
|
2022-05-12 10:33:50 -04:00
|
|
|
remaining_queue = self._event_persist_queues.pop(room_id, None)
|
|
|
|
if remaining_queue:
|
|
|
|
self._event_persist_queues[room_id] = remaining_queue
|
2019-10-23 07:00:21 -04:00
|
|
|
self._currently_persisting_rooms.discard(room_id)
|
|
|
|
|
|
|
|
# set handle_queue_loop off in the background
|
|
|
|
run_as_background_process("persist_events", handle_queue_loop)
|
|
|
|
|
2022-05-12 10:33:50 -04:00
|
|
|
def _get_drainining_queue(
|
|
|
|
self, room_id: str
|
|
|
|
) -> Generator[_EventPersistQueueItem, None, None]:
|
2019-10-23 07:00:21 -04:00
|
|
|
queue = self._event_persist_queues.setdefault(room_id, deque())
|
|
|
|
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
yield queue.popleft()
|
|
|
|
except IndexError:
|
|
|
|
# Queue has been drained.
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-05-31 08:17:50 -04:00
|
|
|
class EventsPersistenceStorageController:
|
2019-10-30 09:33:38 -04:00
|
|
|
"""High level interface for handling persisting newly received events.
|
|
|
|
|
|
|
|
Takes care of batching up events by room, and calculating the necessary
|
|
|
|
current state and forward extremity changes.
|
|
|
|
"""
|
|
|
|
|
2022-07-14 09:57:02 -04:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
hs: "HomeServer",
|
|
|
|
stores: Databases,
|
|
|
|
state_controller: StateStorageController,
|
|
|
|
):
|
2019-10-23 07:00:21 -04:00
|
|
|
# We ultimately want to split out the state store from the main store,
|
|
|
|
# so we use separate variables here even though they point to the same
|
|
|
|
# store for now.
|
2019-10-23 11:14:16 -04:00
|
|
|
self.main_store = stores.main
|
2019-12-20 05:48:24 -05:00
|
|
|
self.state_store = stores.state
|
2020-09-11 07:22:55 -04:00
|
|
|
|
|
|
|
assert stores.persist_events
|
2020-05-13 08:38:22 -04:00
|
|
|
self.persist_events_store = stores.persist_events
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
self._clock = hs.get_clock()
|
2020-09-24 08:24:17 -04:00
|
|
|
self._instance_name = hs.get_instance_name()
|
2019-10-23 07:00:21 -04:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2022-07-07 08:19:31 -04:00
|
|
|
self._event_persist_queue = _EventPeristenceQueue(
|
|
|
|
self._process_event_persist_queue_task
|
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
self._state_resolution_handler = hs.get_state_resolution_handler()
|
2022-07-14 09:57:02 -04:00
|
|
|
self._state_controller = state_controller
|
2023-07-31 05:58:03 -04:00
|
|
|
self.hs = hs
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2022-07-07 08:19:31 -04:00
|
|
|
async def _process_event_persist_queue_task(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
task: _EventPersistQueueTask,
|
|
|
|
) -> Dict[str, str]:
|
|
|
|
"""Callback for the _event_persist_queue
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A dictionary of event ID to event ID we didn't persist as we already
|
|
|
|
had another event persisted with the same TXN ID.
|
|
|
|
"""
|
2023-07-31 05:58:03 -04:00
|
|
|
|
|
|
|
# Ensure that the room can't be deleted while we're persisting events to
|
|
|
|
# it. We might already have taken out the lock, but since this is just a
|
|
|
|
# "read" lock its inherently reentrant.
|
|
|
|
async with self.hs.get_worker_locks_handler().acquire_read_write_lock(
|
2023-08-16 10:19:54 -04:00
|
|
|
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
2023-07-31 05:58:03 -04:00
|
|
|
):
|
|
|
|
if isinstance(task, _PersistEventsTask):
|
|
|
|
return await self._persist_event_batch(room_id, task)
|
|
|
|
elif isinstance(task, _UpdateCurrentStateTask):
|
|
|
|
await self._update_current_state(room_id, task)
|
|
|
|
return {}
|
|
|
|
else:
|
|
|
|
raise AssertionError(
|
|
|
|
f"Found an unexpected task type in event persistence queue: {task}"
|
|
|
|
)
|
2022-07-07 08:19:31 -04:00
|
|
|
|
2022-08-16 13:39:40 -04:00
|
|
|
@trace
|
2020-07-28 16:09:53 -04:00
|
|
|
async def persist_events(
|
2020-01-20 13:07:20 -05:00
|
|
|
self,
|
2020-09-29 10:57:36 -04:00
|
|
|
events_and_contexts: Iterable[Tuple[EventBase, EventContext]],
|
2020-01-20 13:07:20 -05:00
|
|
|
backfilled: bool = False,
|
2020-10-13 07:07:56 -04:00
|
|
|
) -> Tuple[List[EventBase], RoomStreamToken]:
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
|
|
|
Write events to the database
|
|
|
|
Args:
|
|
|
|
events_and_contexts: list of tuples of (event, context)
|
2020-01-20 13:07:20 -05:00
|
|
|
backfilled: Whether the results are retrieved from federation
|
2019-10-23 07:00:21 -04:00
|
|
|
via backfill or not. Used to determine if they're "new" events
|
|
|
|
which might update the current state etc.
|
|
|
|
|
|
|
|
Returns:
|
2020-10-13 07:07:56 -04:00
|
|
|
List of events persisted, the current position room stream position.
|
|
|
|
The list of events persisted may not be the same as those passed in
|
|
|
|
if they were deduplicated due to an event already existing that
|
2022-05-26 07:09:16 -04:00
|
|
|
matched the transaction ID; the existing event is returned in such
|
2020-10-13 07:07:56 -04:00
|
|
|
a case.
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 11:12:52 -04:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
PartialStateConflictError: if attempting to persist a partial state event in
|
|
|
|
a room that has been un-partial stated.
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
2022-08-16 13:39:40 -04:00
|
|
|
event_ids: List[str] = []
|
2021-07-15 12:46:54 -04:00
|
|
|
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
|
2019-10-23 07:00:21 -04:00
|
|
|
for event, ctx in events_and_contexts:
|
|
|
|
partitioned.setdefault(event.room_id, []).append((event, ctx))
|
2022-08-16 13:39:40 -04:00
|
|
|
event_ids.append(event.event_id)
|
|
|
|
|
|
|
|
set_tag(
|
|
|
|
SynapseTags.FUNC_ARG_PREFIX + "event_ids",
|
|
|
|
str(event_ids),
|
|
|
|
)
|
|
|
|
set_tag(
|
|
|
|
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
|
|
|
|
str(len(event_ids)),
|
|
|
|
)
|
|
|
|
set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2022-05-12 10:33:50 -04:00
|
|
|
async def enqueue(
|
2024-09-02 07:39:04 -04:00
|
|
|
item: Tuple[str, List[Tuple[EventBase, EventContext]]],
|
2022-05-12 10:33:50 -04:00
|
|
|
) -> Dict[str, str]:
|
2021-06-14 06:59:27 -04:00
|
|
|
room_id, evs_ctxs = item
|
|
|
|
return await self._event_persist_queue.add_to_queue(
|
2022-07-07 08:19:31 -04:00
|
|
|
room_id,
|
|
|
|
_PersistEventsTask(events_and_contexts=evs_ctxs, backfilled=backfilled),
|
2019-10-23 07:00:21 -04:00
|
|
|
)
|
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
# Each call to add_to_queue returns a map from event ID to existing event ID if
|
|
|
|
# the event was deduplicated. (The dict may also include other entries if
|
2020-10-13 07:07:56 -04:00
|
|
|
# the event was persisted in a batch with other events).
|
|
|
|
#
|
2021-06-14 06:59:27 -04:00
|
|
|
# Since we use `yieldable_gather_results` we need to merge the returned list
|
2020-10-13 07:07:56 -04:00
|
|
|
# of dicts into one.
|
2021-06-14 06:59:27 -04:00
|
|
|
replaced_events: Dict[str, str] = {}
|
2020-10-13 07:07:56 -04:00
|
|
|
for d in ret_vals:
|
|
|
|
replaced_events.update(d)
|
|
|
|
|
2022-09-28 08:31:53 -04:00
|
|
|
persisted_events = []
|
2020-10-13 07:07:56 -04:00
|
|
|
for event, _ in events_and_contexts:
|
|
|
|
existing_event_id = replaced_events.get(event.event_id)
|
|
|
|
if existing_event_id:
|
2022-09-28 08:31:53 -04:00
|
|
|
persisted_events.append(
|
|
|
|
await self.main_store.get_event(existing_event_id)
|
|
|
|
)
|
2020-10-13 07:07:56 -04:00
|
|
|
else:
|
2022-09-28 08:31:53 -04:00
|
|
|
persisted_events.append(event)
|
2020-10-13 07:07:56 -04:00
|
|
|
|
|
|
|
return (
|
2022-09-28 08:31:53 -04:00
|
|
|
persisted_events,
|
2020-10-13 07:07:56 -04:00
|
|
|
self.main_store.get_room_max_token(),
|
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2022-08-16 13:39:40 -04:00
|
|
|
@trace
|
2020-07-28 16:09:53 -04:00
|
|
|
async def persist_event(
|
|
|
|
self, event: EventBase, context: EventContext, backfilled: bool = False
|
2020-10-13 07:07:56 -04:00
|
|
|
) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]:
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
|
|
|
Returns:
|
2020-10-13 07:07:56 -04:00
|
|
|
The event, stream ordering of `event`, and the stream ordering of the
|
|
|
|
latest persisted event. The returned event may not match the given
|
|
|
|
event if it was deduplicated due to an existing event matching the
|
|
|
|
transaction ID.
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 11:12:52 -04:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
PartialStateConflictError: if attempting to persist a partial state event in
|
|
|
|
a room that has been un-partial stated.
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
2021-06-14 06:59:27 -04:00
|
|
|
# add_to_queue returns a map from event ID to existing event ID if the
|
2020-10-13 07:07:56 -04:00
|
|
|
# event was deduplicated. (The dict may also include other entries if
|
|
|
|
# the event was persisted in a batch with other events.)
|
2021-06-14 06:59:27 -04:00
|
|
|
replaced_events = await self._event_persist_queue.add_to_queue(
|
2022-07-07 08:19:31 -04:00
|
|
|
event.room_id,
|
|
|
|
_PersistEventsTask(
|
|
|
|
events_and_contexts=[(event, context)], backfilled=backfilled
|
|
|
|
),
|
2021-06-14 06:59:27 -04:00
|
|
|
)
|
2020-10-13 07:07:56 -04:00
|
|
|
replaced_event = replaced_events.get(event.event_id)
|
|
|
|
if replaced_event:
|
|
|
|
event = await self.main_store.get_event(replaced_event)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2020-09-24 08:24:17 -04:00
|
|
|
event_stream_id = event.internal_metadata.stream_ordering
|
2020-10-05 09:43:14 -04:00
|
|
|
# stream ordering should have been assigned by now
|
|
|
|
assert event_stream_id
|
2020-09-24 08:24:17 -04:00
|
|
|
|
|
|
|
pos = PersistedEventPosition(self._instance_name, event_stream_id)
|
2020-10-13 07:07:56 -04:00
|
|
|
return event, pos, self.main_store.get_room_max_token()
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2022-04-12 09:23:43 -04:00
|
|
|
async def update_current_state(self, room_id: str) -> None:
|
|
|
|
"""Recalculate the current state for a room, and persist it"""
|
2022-07-07 08:19:31 -04:00
|
|
|
await self._event_persist_queue.add_to_queue(
|
|
|
|
room_id,
|
|
|
|
_UpdateCurrentStateTask(),
|
|
|
|
)
|
|
|
|
|
|
|
|
async def _update_current_state(
|
|
|
|
self, room_id: str, _task: _UpdateCurrentStateTask
|
|
|
|
) -> None:
|
|
|
|
"""Callback for the _event_persist_queue
|
|
|
|
|
|
|
|
Recalculates the current state for a room, and persists it.
|
|
|
|
"""
|
2022-04-12 09:23:43 -04:00
|
|
|
state = await self._calculate_current_state(room_id)
|
|
|
|
delta = await self._calculate_state_delta(room_id, state)
|
Sliding Sync: Pre-populate room data for quick filtering/sorting (#17512)
Pre-populate room data for quick filtering/sorting in the Sliding Sync
API
Spawning from
https://github.com/element-hq/synapse/pull/17450#discussion_r1697335578
This PR is acting as the Synapse version `N+1` step in the gradual
migration being tracked by
https://github.com/element-hq/synapse/issues/17623
Adding two new database tables:
- `sliding_sync_joined_rooms`: A table for storing room meta data that
the local server is still participating in. The info here can be shared
across all `Membership.JOIN`. Keyed on `(room_id)` and updated when the
relevant room current state changes or a new event is sent in the room.
- `sliding_sync_membership_snapshots`: A table for storing a snapshot of
room meta data at the time of the local user's membership. Keyed on
`(room_id, user_id)` and only updated when a user's membership in a room
changes.
Also adds background updates to populate these tables with all of the
existing data.
We want to have the guarantee that if a row exists in the sliding sync
tables, we are able to rely on it (accurate data). And if a row doesn't
exist, we use a fallback to get the same info until the background
updates fill in the rows or a new event comes in triggering it to be
fully inserted. This means we need a couple extra things in place until
we bump `SCHEMA_COMPAT_VERSION` and run the foreground update in the
`N+2` part of the gradual migration. For context on why we can't rely on
the tables without these things see [1].
1. On start-up, block until we clear out any rows for the rooms that
have had events since the max-`stream_ordering` of the
`sliding_sync_joined_rooms` table (compare to max-`stream_ordering` of
the `events` table). For `sliding_sync_membership_snapshots`, we can
compare to the max-`stream_ordering` of `local_current_membership`
- This accounts for when someone downgrades their Synapse version and
then upgrades it again. This will ensure that we don't have any
stale/out-of-date data in the
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables
since any new events sent in rooms would have also needed to be written
to the sliding sync tables. For example a new event needs to bump
`event_stream_ordering` in `sliding_sync_joined_rooms` table or some
state in the room changing (like the room name). Or another example of
someone's membership changing in a room affecting
`sliding_sync_membership_snapshots`.
1. Add another background update that will catch-up with any rows that
were just deleted from the sliding sync tables (based on the activity in
the `events`/`local_current_membership`). The rooms that need
recalculating are added to the
`sliding_sync_joined_rooms_to_recalculate` table.
1. Making sure rows are fully inserted. Instead of partially inserting,
we need to check if the row already exists and fully insert all data if
not.
All of this extra functionality can be removed once the
`SCHEMA_COMPAT_VERSION` is bumped with support for the new sliding sync
tables so people can no longer downgrade (the `N+2` part of the gradual
migration).
<details>
<summary><sup>[1]</sup></summary>
For `sliding_sync_joined_rooms`, since we partially insert rows as state
comes in, we can't rely on the existence of the row for a given
`room_id`. We can't even rely on looking at whether the background
update has finished. There could still be partial rows from when someone
reverted their Synapse version after the background update finished, had
some state changes (or new rooms), then upgraded again and more state
changes happen leaving a partial row.
For `sliding_sync_membership_snapshots`, we insert items as a whole
except for the `forgotten` column ~~so we can rely on rows existing and
just need to always use a fallback for the `forgotten` data. We can't
use the `forgotten` column in the table for the same reasons above about
`sliding_sync_joined_rooms`.~~ We could have an out-of-date membership
from when someone reverted their Synapse version. (same problems as
outlined for `sliding_sync_joined_rooms` above)
Discussed in an [internal
meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.dz5x6ef4mxz7)
</details>
### TODO
- [x] Update `stream_ordering`/`bump_stamp`
- [x] Handle remote invites
- [x] Handle state resets
- [x] Consider adding `sender` so we can filter `LEAVE` memberships and
distinguish from kicks.
- [x] We should add it to be able to tell leaves from kicks
- [x] Consider adding `tombstone` state to help address
https://github.com/element-hq/synapse/issues/17540
- [x] We should add it `tombstone_successor_room_id`
- [x] Consider adding `forgotten` status to avoid extra
lookup/table-join on `room_memberships`
- [x] We should add it
- [x] Background update to fill in values for all joined rooms and
non-join membership
- [x] Clean-up tables when room is deleted
- [ ] Make sure tables are useful to our use case
- First explored in
https://github.com/element-hq/synapse/compare/erikj/ss_use_new_tables
- Also explored in
https://github.com/element-hq/synapse/commit/76b5a576eb363496315dfd39510cad7d02b0fc73
- [x] Plan for how can we use this with a fallback
- See plan discussed above in main area of the issue description
- Discussed in an [internal
meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.dz5x6ef4mxz7)
- [x] Plan for how we can rely on this new table without a fallback
- Synapse version `N+1`: (this PR) Bump `SCHEMA_VERSION` to `87`. Add
new tables and background update to backfill all rows. Since this is a
new table, we don't have to add any `NOT VALID` constraints and validate
them when the background update completes. Read from new tables with a
fallback in cases where the rows aren't filled in yet.
- Synapse version `N+2`: Bump `SCHEMA_VERSION` to `88` and bump
`SCHEMA_COMPAT_VERSION` to `87` because we don't want people to
downgrade and miss writes while they are on an older version. Add a
foreground update to finish off the backfill so we can read from new
tables without the fallback. Application code can now rely on the new
tables being populated.
- Discussed in an [internal
meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.hh7shg4cxdhj)
### Dev notes
```
SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.storage.test_events.SlidingSyncPrePopulatedTablesTestCase
SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.storage.test_events.SlidingSyncPrePopulatedTablesTestCase
```
```
SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.FilterRoomsTestCase
```
Reference:
- [Development docs on background updates and worked examples of gradual
migrations
](https://github.com/element-hq/synapse/blob/1dfa59b238cee0dc62163588cc9481896c288979/docs/development/database_schema.md#background-updates)
- A real example of a gradual migration:
https://github.com/matrix-org/synapse/pull/15649#discussion_r1213779514
- Adding `rooms.creator` field that needed a background update to
backfill data, https://github.com/matrix-org/synapse/pull/10697
- Adding `rooms.room_version` that needed a background update to
backfill data, https://github.com/matrix-org/synapse/pull/6729
- Adding `room_stats_state.room_type` that needed a background update to
backfill data, https://github.com/matrix-org/synapse/pull/13031
- Tables from MSC2716: `insertion_events`, `insertion_event_edges`,
`insertion_event_extremities`, `batch_events`
- `current_state_events` updated in
`synapse/storage/databases/main/events.py`
---
```
persist_event (adds to queue)
_persist_event_batch
_persist_events_and_state_updates (assigns `stream_ordering` to events)
_persist_events_txn
_store_event_txn
_update_metadata_tables_txn
_store_room_members_txn
_update_current_state_txn
```
---
> Concatenated Indexes [...] (also known as multi-column, composite or
combined index)
>
> [...] key consists of multiple columns.
>
> We can take advantage of the fact that the first index column is
always usable for searching
>
> *--
https://use-the-index-luke.com/sql/where-clause/the-equals-operator/concatenated-keys*
---
Dealing with `portdb` (`synapse/_scripts/synapse_port_db.py`),
https://github.com/element-hq/synapse/pull/17512#discussion_r1725998219
---
<details>
<summary>SQL queries:</summary>
Both of these are equivalent and work in SQLite and Postgres
Options 1:
```sql
WITH data_table (room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)}) AS (
VALUES (
?, ?, ?,
(SELECT membership FROM room_memberships WHERE event_id = ?),
(SELECT stream_ordering FROM events WHERE event_id = ?),
{", ".join("?" for _ in insert_values)}
)
)
INSERT INTO sliding_sync_non_join_memberships
(room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)})
SELECT * FROM data_table
WHERE membership != ?
ON CONFLICT (room_id, user_id)
DO UPDATE SET
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
event_stream_ordering = EXCLUDED.event_stream_ordering,
{", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)}
```
Option 2:
```sql
INSERT INTO sliding_sync_non_join_memberships
(room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)})
SELECT
column1 as room_id,
column2 as user_id,
column3 as membership_event_id,
column4 as membership,
column5 as event_stream_ordering,
{", ".join("column" + str(i) for i in range(6, 6 + len(insert_keys)))}
FROM (
VALUES (
?, ?, ?,
(SELECT membership FROM room_memberships WHERE event_id = ?),
(SELECT stream_ordering FROM events WHERE event_id = ?),
{", ".join("?" for _ in insert_values)}
)
) as v
WHERE membership != ?
ON CONFLICT (room_id, user_id)
DO UPDATE SET
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
event_stream_ordering = EXCLUDED.event_stream_ordering,
{", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)}
```
If we don't need the `membership` condition, we could use:
```sql
INSERT INTO sliding_sync_non_join_memberships
(room_id, membership_event_id, user_id, membership, event_stream_ordering, {", ".join(insert_keys)})
VALUES (
?, ?, ?,
(SELECT membership FROM room_memberships WHERE event_id = ?),
(SELECT stream_ordering FROM events WHERE event_id = ?),
{", ".join("?" for _ in insert_values)}
)
ON CONFLICT (room_id, user_id)
DO UPDATE SET
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
event_stream_ordering = EXCLUDED.event_stream_ordering,
{", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)}
```
</details>
### Pull Request Checklist
<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->
* [x] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [x] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Erik Johnston <erik@matrix.org>
2024-08-29 11:09:51 -04:00
|
|
|
sliding_sync_table_changes = (
|
|
|
|
await self.persist_events_store._calculate_sliding_sync_table_changes(
|
|
|
|
room_id, [], delta
|
|
|
|
)
|
|
|
|
)
|
2022-04-12 09:23:43 -04:00
|
|
|
|
Sliding Sync: Pre-populate room data for quick filtering/sorting (#17512)
Pre-populate room data for quick filtering/sorting in the Sliding Sync
API
Spawning from
https://github.com/element-hq/synapse/pull/17450#discussion_r1697335578
This PR is acting as the Synapse version `N+1` step in the gradual
migration being tracked by
https://github.com/element-hq/synapse/issues/17623
Adding two new database tables:
- `sliding_sync_joined_rooms`: A table for storing room meta data that
the local server is still participating in. The info here can be shared
across all `Membership.JOIN`. Keyed on `(room_id)` and updated when the
relevant room current state changes or a new event is sent in the room.
- `sliding_sync_membership_snapshots`: A table for storing a snapshot of
room meta data at the time of the local user's membership. Keyed on
`(room_id, user_id)` and only updated when a user's membership in a room
changes.
Also adds background updates to populate these tables with all of the
existing data.
We want to have the guarantee that if a row exists in the sliding sync
tables, we are able to rely on it (accurate data). And if a row doesn't
exist, we use a fallback to get the same info until the background
updates fill in the rows or a new event comes in triggering it to be
fully inserted. This means we need a couple extra things in place until
we bump `SCHEMA_COMPAT_VERSION` and run the foreground update in the
`N+2` part of the gradual migration. For context on why we can't rely on
the tables without these things see [1].
1. On start-up, block until we clear out any rows for the rooms that
have had events since the max-`stream_ordering` of the
`sliding_sync_joined_rooms` table (compare to max-`stream_ordering` of
the `events` table). For `sliding_sync_membership_snapshots`, we can
compare to the max-`stream_ordering` of `local_current_membership`
- This accounts for when someone downgrades their Synapse version and
then upgrades it again. This will ensure that we don't have any
stale/out-of-date data in the
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables
since any new events sent in rooms would have also needed to be written
to the sliding sync tables. For example a new event needs to bump
`event_stream_ordering` in `sliding_sync_joined_rooms` table or some
state in the room changing (like the room name). Or another example of
someone's membership changing in a room affecting
`sliding_sync_membership_snapshots`.
1. Add another background update that will catch-up with any rows that
were just deleted from the sliding sync tables (based on the activity in
the `events`/`local_current_membership`). The rooms that need
recalculating are added to the
`sliding_sync_joined_rooms_to_recalculate` table.
1. Making sure rows are fully inserted. Instead of partially inserting,
we need to check if the row already exists and fully insert all data if
not.
All of this extra functionality can be removed once the
`SCHEMA_COMPAT_VERSION` is bumped with support for the new sliding sync
tables so people can no longer downgrade (the `N+2` part of the gradual
migration).
<details>
<summary><sup>[1]</sup></summary>
For `sliding_sync_joined_rooms`, since we partially insert rows as state
comes in, we can't rely on the existence of the row for a given
`room_id`. We can't even rely on looking at whether the background
update has finished. There could still be partial rows from when someone
reverted their Synapse version after the background update finished, had
some state changes (or new rooms), then upgraded again and more state
changes happen leaving a partial row.
For `sliding_sync_membership_snapshots`, we insert items as a whole
except for the `forgotten` column ~~so we can rely on rows existing and
just need to always use a fallback for the `forgotten` data. We can't
use the `forgotten` column in the table for the same reasons above about
`sliding_sync_joined_rooms`.~~ We could have an out-of-date membership
from when someone reverted their Synapse version. (same problems as
outlined for `sliding_sync_joined_rooms` above)
Discussed in an [internal
meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.dz5x6ef4mxz7)
</details>
### TODO
- [x] Update `stream_ordering`/`bump_stamp`
- [x] Handle remote invites
- [x] Handle state resets
- [x] Consider adding `sender` so we can filter `LEAVE` memberships and
distinguish from kicks.
- [x] We should add it to be able to tell leaves from kicks
- [x] Consider adding `tombstone` state to help address
https://github.com/element-hq/synapse/issues/17540
- [x] We should add it `tombstone_successor_room_id`
- [x] Consider adding `forgotten` status to avoid extra
lookup/table-join on `room_memberships`
- [x] We should add it
- [x] Background update to fill in values for all joined rooms and
non-join membership
- [x] Clean-up tables when room is deleted
- [ ] Make sure tables are useful to our use case
- First explored in
https://github.com/element-hq/synapse/compare/erikj/ss_use_new_tables
- Also explored in
https://github.com/element-hq/synapse/commit/76b5a576eb363496315dfd39510cad7d02b0fc73
- [x] Plan for how can we use this with a fallback
- See plan discussed above in main area of the issue description
- Discussed in an [internal
meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.dz5x6ef4mxz7)
- [x] Plan for how we can rely on this new table without a fallback
- Synapse version `N+1`: (this PR) Bump `SCHEMA_VERSION` to `87`. Add
new tables and background update to backfill all rows. Since this is a
new table, we don't have to add any `NOT VALID` constraints and validate
them when the background update completes. Read from new tables with a
fallback in cases where the rows aren't filled in yet.
- Synapse version `N+2`: Bump `SCHEMA_VERSION` to `88` and bump
`SCHEMA_COMPAT_VERSION` to `87` because we don't want people to
downgrade and miss writes while they are on an older version. Add a
foreground update to finish off the backfill so we can read from new
tables without the fallback. Application code can now rely on the new
tables being populated.
- Discussed in an [internal
meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.hh7shg4cxdhj)
### Dev notes
```
SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.storage.test_events.SlidingSyncPrePopulatedTablesTestCase
SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.storage.test_events.SlidingSyncPrePopulatedTablesTestCase
```
```
SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.FilterRoomsTestCase
```
Reference:
- [Development docs on background updates and worked examples of gradual
migrations
](https://github.com/element-hq/synapse/blob/1dfa59b238cee0dc62163588cc9481896c288979/docs/development/database_schema.md#background-updates)
- A real example of a gradual migration:
https://github.com/matrix-org/synapse/pull/15649#discussion_r1213779514
- Adding `rooms.creator` field that needed a background update to
backfill data, https://github.com/matrix-org/synapse/pull/10697
- Adding `rooms.room_version` that needed a background update to
backfill data, https://github.com/matrix-org/synapse/pull/6729
- Adding `room_stats_state.room_type` that needed a background update to
backfill data, https://github.com/matrix-org/synapse/pull/13031
- Tables from MSC2716: `insertion_events`, `insertion_event_edges`,
`insertion_event_extremities`, `batch_events`
- `current_state_events` updated in
`synapse/storage/databases/main/events.py`
---
```
persist_event (adds to queue)
_persist_event_batch
_persist_events_and_state_updates (assigns `stream_ordering` to events)
_persist_events_txn
_store_event_txn
_update_metadata_tables_txn
_store_room_members_txn
_update_current_state_txn
```
---
> Concatenated Indexes [...] (also known as multi-column, composite or
combined index)
>
> [...] key consists of multiple columns.
>
> We can take advantage of the fact that the first index column is
always usable for searching
>
> *--
https://use-the-index-luke.com/sql/where-clause/the-equals-operator/concatenated-keys*
---
Dealing with `portdb` (`synapse/_scripts/synapse_port_db.py`),
https://github.com/element-hq/synapse/pull/17512#discussion_r1725998219
---
<details>
<summary>SQL queries:</summary>
Both of these are equivalent and work in SQLite and Postgres
Options 1:
```sql
WITH data_table (room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)}) AS (
VALUES (
?, ?, ?,
(SELECT membership FROM room_memberships WHERE event_id = ?),
(SELECT stream_ordering FROM events WHERE event_id = ?),
{", ".join("?" for _ in insert_values)}
)
)
INSERT INTO sliding_sync_non_join_memberships
(room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)})
SELECT * FROM data_table
WHERE membership != ?
ON CONFLICT (room_id, user_id)
DO UPDATE SET
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
event_stream_ordering = EXCLUDED.event_stream_ordering,
{", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)}
```
Option 2:
```sql
INSERT INTO sliding_sync_non_join_memberships
(room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)})
SELECT
column1 as room_id,
column2 as user_id,
column3 as membership_event_id,
column4 as membership,
column5 as event_stream_ordering,
{", ".join("column" + str(i) for i in range(6, 6 + len(insert_keys)))}
FROM (
VALUES (
?, ?, ?,
(SELECT membership FROM room_memberships WHERE event_id = ?),
(SELECT stream_ordering FROM events WHERE event_id = ?),
{", ".join("?" for _ in insert_values)}
)
) as v
WHERE membership != ?
ON CONFLICT (room_id, user_id)
DO UPDATE SET
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
event_stream_ordering = EXCLUDED.event_stream_ordering,
{", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)}
```
If we don't need the `membership` condition, we could use:
```sql
INSERT INTO sliding_sync_non_join_memberships
(room_id, membership_event_id, user_id, membership, event_stream_ordering, {", ".join(insert_keys)})
VALUES (
?, ?, ?,
(SELECT membership FROM room_memberships WHERE event_id = ?),
(SELECT stream_ordering FROM events WHERE event_id = ?),
{", ".join("?" for _ in insert_values)}
)
ON CONFLICT (room_id, user_id)
DO UPDATE SET
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
event_stream_ordering = EXCLUDED.event_stream_ordering,
{", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)}
```
</details>
### Pull Request Checklist
<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->
* [x] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [x] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct
(run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Erik Johnston <erik@matrix.org>
2024-08-29 11:09:51 -04:00
|
|
|
await self.persist_events_store.update_current_state(
|
|
|
|
room_id, delta, sliding_sync_table_changes
|
|
|
|
)
|
2022-04-12 09:23:43 -04:00
|
|
|
|
|
|
|
async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
|
|
|
|
"""Calculate the current state of a room, based on the forward extremities
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: room for which to calculate current state
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
map from (type, state_key) to event id for the current state in the room
|
|
|
|
"""
|
|
|
|
latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id)
|
|
|
|
state_groups = set(
|
|
|
|
(
|
|
|
|
await self.main_store._get_state_group_for_events(latest_event_ids)
|
|
|
|
).values()
|
|
|
|
)
|
|
|
|
|
|
|
|
state_maps_by_state_group = await self.state_store._get_state_for_groups(
|
|
|
|
state_groups
|
|
|
|
)
|
|
|
|
|
|
|
|
if len(state_groups) == 1:
|
|
|
|
# If there is only one state group, then we know what the current
|
|
|
|
# state is.
|
|
|
|
return state_maps_by_state_group[state_groups.pop()]
|
|
|
|
|
|
|
|
# Ok, we need to defer to the state handler to resolve our state sets.
|
|
|
|
logger.debug("calling resolve_state_groups from preserve_events")
|
|
|
|
|
|
|
|
# Avoid a circular import.
|
|
|
|
from synapse.state import StateResolutionStore
|
|
|
|
|
|
|
|
room_version = await self.main_store.get_room_version_id(room_id)
|
|
|
|
res = await self._state_resolution_handler.resolve_state_groups(
|
|
|
|
room_id,
|
|
|
|
room_version,
|
|
|
|
state_maps_by_state_group,
|
|
|
|
event_map=None,
|
|
|
|
state_res_store=StateResolutionStore(self.main_store),
|
|
|
|
)
|
|
|
|
|
2022-07-14 09:57:02 -04:00
|
|
|
return await res.get_state(self._state_controller, StateFilter.all())
|
2022-04-12 09:23:43 -04:00
|
|
|
|
2021-06-14 06:59:27 -04:00
|
|
|
async def _persist_event_batch(
|
2023-11-03 07:30:31 -04:00
|
|
|
self, room_id: str, task: _PersistEventsTask
|
2020-10-13 07:07:56 -04:00
|
|
|
) -> Dict[str, str]:
|
2021-06-14 06:59:27 -04:00
|
|
|
"""Callback for the _event_persist_queue
|
|
|
|
|
|
|
|
Calculates the change to current state and forward extremities, and
|
2019-10-30 09:33:38 -04:00
|
|
|
persists the given events and with those updates.
|
2020-10-13 07:07:56 -04:00
|
|
|
|
2023-11-03 07:30:31 -04:00
|
|
|
Assumes that we are only persisting events for one room at a time.
|
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
Returns:
|
|
|
|
A dictionary of event ID to event ID we didn't persist as we already
|
|
|
|
had another event persisted with the same TXN ID.
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 11:12:52 -04:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
PartialStateConflictError: if attempting to persist a partial state event in
|
|
|
|
a room that has been un-partial stated.
|
2019-10-23 07:00:21 -04:00
|
|
|
"""
|
2022-07-07 08:19:31 -04:00
|
|
|
events_and_contexts = task.events_and_contexts
|
|
|
|
backfilled = task.backfilled
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
replaced_events: Dict[str, str] = {}
|
2019-10-23 07:00:21 -04:00
|
|
|
if not events_and_contexts:
|
2020-10-13 07:07:56 -04:00
|
|
|
return replaced_events
|
|
|
|
|
|
|
|
# Check if any of the events have a transaction ID that has already been
|
|
|
|
# persisted, and if so we don't persist it again.
|
|
|
|
#
|
|
|
|
# We should have checked this a long time before we get here, but it's
|
|
|
|
# possible that different send event requests race in such a way that
|
|
|
|
# they both pass the earlier checks. Checking here isn't racey as we can
|
|
|
|
# have only one `_persist_events` per room being called at a time.
|
|
|
|
replaced_events = await self.main_store.get_already_persisted_events(
|
|
|
|
(event for event, _ in events_and_contexts)
|
|
|
|
)
|
|
|
|
|
|
|
|
if replaced_events:
|
|
|
|
events_and_contexts = [
|
|
|
|
(e, ctx)
|
|
|
|
for e, ctx in events_and_contexts
|
|
|
|
if e.event_id not in replaced_events
|
|
|
|
]
|
|
|
|
|
|
|
|
if not events_and_contexts:
|
|
|
|
return replaced_events
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
chunks = [
|
|
|
|
events_and_contexts[x : x + 100]
|
|
|
|
for x in range(0, len(events_and_contexts), 100)
|
|
|
|
]
|
|
|
|
|
|
|
|
for chunk in chunks:
|
|
|
|
# We can't easily parallelize these since different chunks
|
|
|
|
# might contain the same event. :(
|
|
|
|
|
2023-11-03 07:30:31 -04:00
|
|
|
new_forward_extremities = None
|
|
|
|
state_delta_for_room = None
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
if not backfilled:
|
|
|
|
with Measure(self._clock, "_calculate_state_and_extrem"):
|
2023-11-03 07:30:31 -04:00
|
|
|
# Work out the new "current state" for the room.
|
2019-10-23 07:00:21 -04:00
|
|
|
# We do this by working out what the new extremities are and then
|
|
|
|
# calculating the state from that.
|
2023-11-03 07:30:31 -04:00
|
|
|
(
|
|
|
|
new_forward_extremities,
|
|
|
|
state_delta_for_room,
|
|
|
|
) = await self._calculate_new_forward_extremities_and_state_delta(
|
|
|
|
room_id, chunk
|
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2024-06-24 10:40:28 -04:00
|
|
|
with Measure(self._clock, "calculate_chain_cover_index_for_events"):
|
|
|
|
# We now calculate chain ID/sequence numbers for any state events we're
|
|
|
|
# persisting. We ignore out of band memberships as we're not in the room
|
|
|
|
# and won't have their auth chain (we'll fix it up later if we join the
|
|
|
|
# room).
|
|
|
|
#
|
|
|
|
# See: docs/auth_chain_difference_algorithm.md
|
|
|
|
new_event_links = await self.persist_events_store.calculate_chain_cover_index_for_events(
|
|
|
|
room_id, [e for e, _ in chunk]
|
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
await self.persist_events_store._persist_events_and_state_updates(
|
2023-11-03 07:30:31 -04:00
|
|
|
room_id,
|
2019-10-23 07:00:21 -04:00
|
|
|
chunk,
|
|
|
|
state_delta_for_room=state_delta_for_room,
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities=new_forward_extremities,
|
2021-11-29 17:01:54 -05:00
|
|
|
use_negative_stream_ordering=backfilled,
|
|
|
|
inhibit_local_membership_updates=backfilled,
|
2024-06-24 10:40:28 -04:00
|
|
|
new_event_links=new_event_links,
|
2019-10-23 07:00:21 -04:00
|
|
|
)
|
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
return replaced_events
|
|
|
|
|
2023-11-03 07:30:31 -04:00
|
|
|
async def _calculate_new_forward_extremities_and_state_delta(
|
|
|
|
self, room_id: str, ev_ctx_rm: List[Tuple[EventBase, EventContext]]
|
|
|
|
) -> Tuple[Optional[Set[str]], Optional[DeltaState]]:
|
|
|
|
"""Calculates the new forward extremities and state delta for a room
|
|
|
|
given events to persist.
|
|
|
|
|
|
|
|
Assumes that we are only persisting events for one room at a time.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple of:
|
|
|
|
A set of str giving the new forward extremities the room
|
|
|
|
|
|
|
|
The state delta for the room.
|
|
|
|
"""
|
|
|
|
|
|
|
|
latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id)
|
|
|
|
new_latest_event_ids = await self._calculate_new_extremities(
|
|
|
|
room_id, ev_ctx_rm, latest_event_ids
|
|
|
|
)
|
|
|
|
|
|
|
|
if new_latest_event_ids == latest_event_ids:
|
|
|
|
# No change in extremities, so no change in state
|
|
|
|
return (None, None)
|
|
|
|
|
|
|
|
# there should always be at least one forward extremity.
|
|
|
|
# (except during the initial persistence of the send_join
|
|
|
|
# results, in which case there will be no existing
|
|
|
|
# extremities, so we'll `continue` above and skip this bit.)
|
|
|
|
assert new_latest_event_ids, "No forward extremities left!"
|
|
|
|
|
|
|
|
new_forward_extremities = new_latest_event_ids
|
|
|
|
|
|
|
|
len_1 = len(latest_event_ids) == 1 and len(new_latest_event_ids) == 1
|
|
|
|
if len_1:
|
|
|
|
all_single_prev_not_state = all(
|
|
|
|
len(event.prev_event_ids()) == 1 and not event.is_state()
|
|
|
|
for event, ctx in ev_ctx_rm
|
|
|
|
)
|
|
|
|
# Don't bother calculating state if they're just
|
|
|
|
# a long chain of single ancestor non-state events.
|
|
|
|
if all_single_prev_not_state:
|
|
|
|
return (new_forward_extremities, None)
|
|
|
|
|
|
|
|
state_delta_counter.inc()
|
|
|
|
if len(new_latest_event_ids) == 1:
|
|
|
|
state_delta_single_event_counter.inc()
|
|
|
|
|
|
|
|
# This is a fairly handwavey check to see if we could
|
|
|
|
# have guessed what the delta would have been when
|
|
|
|
# processing one of these events.
|
|
|
|
# What we're interested in is if the latest extremities
|
|
|
|
# were the same when we created the event as they are
|
|
|
|
# now. When this server creates a new event (as opposed
|
|
|
|
# to receiving it over federation) it will use the
|
|
|
|
# forward extremities as the prev_events, so we can
|
|
|
|
# guess this by looking at the prev_events and checking
|
|
|
|
# if they match the current forward extremities.
|
|
|
|
for ev, _ in ev_ctx_rm:
|
|
|
|
prev_event_ids = set(ev.prev_event_ids())
|
|
|
|
if latest_event_ids == prev_event_ids:
|
|
|
|
state_delta_reuse_delta_counter.inc()
|
|
|
|
break
|
|
|
|
|
|
|
|
logger.debug("Calculating state delta for room %s", room_id)
|
|
|
|
with Measure(self._clock, "persist_events.get_new_state_after_events"):
|
|
|
|
res = await self._get_new_state_after_events(
|
|
|
|
room_id,
|
|
|
|
ev_ctx_rm,
|
|
|
|
latest_event_ids,
|
|
|
|
new_latest_event_ids,
|
|
|
|
)
|
|
|
|
current_state, delta_ids, new_latest_event_ids = res
|
|
|
|
|
|
|
|
# there should always be at least one forward extremity.
|
|
|
|
# (except during the initial persistence of the send_join
|
|
|
|
# results, in which case there will be no existing
|
|
|
|
# extremities, so we'll `continue` above and skip this bit.)
|
|
|
|
assert new_latest_event_ids, "No forward extremities left!"
|
|
|
|
|
|
|
|
new_forward_extremities = new_latest_event_ids
|
|
|
|
|
|
|
|
# If either are not None then there has been a change,
|
|
|
|
# and we need to work out the delta (or use that
|
|
|
|
# given)
|
|
|
|
delta = None
|
|
|
|
if delta_ids is not None:
|
|
|
|
# If there is a delta we know that we've
|
|
|
|
# only added or replaced state, never
|
|
|
|
# removed keys entirely.
|
|
|
|
delta = DeltaState([], delta_ids)
|
|
|
|
elif current_state is not None:
|
|
|
|
with Measure(self._clock, "persist_events.calculate_state_delta"):
|
|
|
|
delta = await self._calculate_state_delta(room_id, current_state)
|
|
|
|
|
|
|
|
if delta:
|
|
|
|
# If we have a change of state then lets check
|
|
|
|
# whether we're actually still a member of the room,
|
|
|
|
# or if our last user left. If we're no longer in
|
|
|
|
# the room then we delete the current state and
|
|
|
|
# extremities.
|
|
|
|
is_still_joined = await self._is_server_still_joined(
|
|
|
|
room_id,
|
|
|
|
ev_ctx_rm,
|
|
|
|
delta,
|
|
|
|
)
|
|
|
|
if not is_still_joined:
|
|
|
|
logger.info("Server no longer in room %s", room_id)
|
|
|
|
delta.no_longer_in_room = True
|
|
|
|
|
|
|
|
return (new_forward_extremities, delta)
|
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
async def _calculate_new_extremities(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
2020-07-28 16:09:53 -04:00
|
|
|
event_contexts: List[Tuple[EventBase, EventContext]],
|
2023-09-18 09:29:05 -04:00
|
|
|
latest_event_ids: AbstractSet[str],
|
2022-02-25 05:19:49 -05:00
|
|
|
) -> Set[str]:
|
2019-10-23 07:00:21 -04:00
|
|
|
"""Calculates the new forward extremities for a room given events to
|
|
|
|
persist.
|
|
|
|
|
|
|
|
Assumes that we are only persisting events for one room at a time.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# we're only interested in new events which aren't outliers and which aren't
|
|
|
|
# being rejected.
|
|
|
|
new_events = [
|
|
|
|
event
|
|
|
|
for event, ctx in event_contexts
|
|
|
|
if not event.internal_metadata.is_outlier()
|
|
|
|
and not ctx.rejected
|
|
|
|
and not event.internal_metadata.is_soft_failed()
|
|
|
|
]
|
|
|
|
|
|
|
|
# start with the existing forward extremities
|
|
|
|
result = set(latest_event_ids)
|
|
|
|
|
|
|
|
# add all the new events to the list
|
|
|
|
result.update(event.event_id for event in new_events)
|
|
|
|
|
|
|
|
# Now remove all events which are prev_events of any of the new events
|
|
|
|
result.difference_update(
|
|
|
|
e_id for event in new_events for e_id in event.prev_event_ids()
|
|
|
|
)
|
|
|
|
|
|
|
|
# Remove any events which are prev_events of any existing events.
|
2024-09-02 07:39:04 -04:00
|
|
|
existing_prevs: Collection[
|
|
|
|
str
|
|
|
|
] = await self.persist_events_store._get_events_which_are_prevs(result)
|
2019-10-23 07:00:21 -04:00
|
|
|
result.difference_update(existing_prevs)
|
|
|
|
|
|
|
|
# Finally handle the case where the new events have soft-failed prev
|
|
|
|
# events. If they do we need to remove them and their prev events,
|
|
|
|
# otherwise we end up with dangling extremities.
|
2020-05-13 08:38:22 -04:00
|
|
|
existing_prevs = await self.persist_events_store._get_prevs_before_rejected(
|
2019-10-23 07:00:21 -04:00
|
|
|
e_id for event in new_events for e_id in event.prev_event_ids()
|
|
|
|
)
|
|
|
|
result.difference_update(existing_prevs)
|
|
|
|
|
|
|
|
# We only update metrics for events that change forward extremities
|
|
|
|
# (e.g. we ignore backfill/outliers/etc)
|
|
|
|
if result != latest_event_ids:
|
|
|
|
forward_extremities_counter.observe(len(result))
|
|
|
|
stale = latest_event_ids & result
|
|
|
|
stale_forward_extremities_counter.observe(len(stale))
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
async def _get_new_state_after_events(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
2020-07-28 16:09:53 -04:00
|
|
|
events_context: List[Tuple[EventBase, EventContext]],
|
2023-09-18 09:29:05 -04:00
|
|
|
old_latest_event_ids: AbstractSet[str],
|
2020-12-18 04:49:18 -05:00
|
|
|
new_latest_event_ids: Set[str],
|
|
|
|
) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]:
|
2019-10-23 07:00:21 -04:00
|
|
|
"""Calculate the current state dict after adding some new events to
|
|
|
|
a room
|
|
|
|
|
|
|
|
Args:
|
2020-12-18 04:49:18 -05:00
|
|
|
room_id:
|
2019-10-23 07:00:21 -04:00
|
|
|
room to which the events are being added. Used for logging etc
|
|
|
|
|
2020-12-18 04:49:18 -05:00
|
|
|
events_context:
|
2019-10-23 07:00:21 -04:00
|
|
|
events and contexts which are being added to the room
|
|
|
|
|
2020-12-18 04:49:18 -05:00
|
|
|
old_latest_event_ids:
|
2019-10-23 07:00:21 -04:00
|
|
|
the old forward extremities for the room.
|
|
|
|
|
2020-12-18 04:49:18 -05:00
|
|
|
new_latest_event_ids :
|
2019-10-23 07:00:21 -04:00
|
|
|
the new forward extremities for the room.
|
|
|
|
|
|
|
|
Returns:
|
2020-12-18 04:49:18 -05:00
|
|
|
Returns a tuple of two state maps and a set of new forward
|
|
|
|
extremities.
|
|
|
|
|
|
|
|
The first state map is the full new current state and the second
|
|
|
|
is the delta to the existing current state. If both are None then
|
2022-05-10 15:43:13 -04:00
|
|
|
there has been no change. Either or neither can be None if there
|
|
|
|
has been a change.
|
2020-12-18 04:49:18 -05:00
|
|
|
|
|
|
|
The function may prune some old entries from the set of new
|
|
|
|
forward extremities if it's safe to do so.
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
If there has been a change then we only return the delta if its
|
|
|
|
already been calculated. Conversely if we do know the delta then
|
|
|
|
the new current state is only returned if we've already calculated
|
|
|
|
it.
|
|
|
|
"""
|
|
|
|
# Map from (prev state group, new state group) -> delta state dict
|
|
|
|
state_group_deltas = {}
|
|
|
|
|
|
|
|
for ev, ctx in events_context:
|
|
|
|
if ctx.state_group is None:
|
|
|
|
# This should only happen for outlier events.
|
|
|
|
if not ev.internal_metadata.is_outlier():
|
|
|
|
raise Exception(
|
|
|
|
"Context for new event %s has no state "
|
|
|
|
"group" % (ev.event_id,)
|
|
|
|
)
|
|
|
|
continue
|
2023-06-13 16:22:06 -04:00
|
|
|
if ctx.state_group_deltas:
|
|
|
|
state_group_deltas.update(ctx.state_group_deltas)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
# We need to map the event_ids to their state groups. First, let's
|
|
|
|
# check if the event is one we're persisting, in which case we can
|
|
|
|
# pull the state group from its context.
|
|
|
|
# Otherwise we need to pull the state group from the database.
|
|
|
|
|
|
|
|
# Set of events we need to fetch groups for. (We know none of the old
|
|
|
|
# extremities are going to be in events_context).
|
|
|
|
missing_event_ids = set(old_latest_event_ids)
|
|
|
|
|
|
|
|
event_id_to_state_group = {}
|
|
|
|
for event_id in new_latest_event_ids:
|
|
|
|
# First search in the list of new events we're adding.
|
|
|
|
for ev, ctx in events_context:
|
|
|
|
if event_id == ev.event_id and ctx.state_group is not None:
|
|
|
|
event_id_to_state_group[event_id] = ctx.state_group
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# If we couldn't find it, then we'll need to pull
|
|
|
|
# the state from the database
|
|
|
|
missing_event_ids.add(event_id)
|
|
|
|
|
|
|
|
if missing_event_ids:
|
|
|
|
# Now pull out the state groups for any missing events from DB
|
2020-01-20 13:07:20 -05:00
|
|
|
event_to_groups = await self.main_store._get_state_group_for_events(
|
2019-10-23 07:00:21 -04:00
|
|
|
missing_event_ids
|
|
|
|
)
|
|
|
|
event_id_to_state_group.update(event_to_groups)
|
|
|
|
|
|
|
|
# State groups of old_latest_event_ids
|
2020-02-21 07:15:07 -05:00
|
|
|
old_state_groups = {
|
2019-10-23 07:00:21 -04:00
|
|
|
event_id_to_state_group[evid] for evid in old_latest_event_ids
|
2020-02-21 07:15:07 -05:00
|
|
|
}
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
# State groups of new_latest_event_ids
|
2020-02-21 07:15:07 -05:00
|
|
|
new_state_groups = {
|
2019-10-23 07:00:21 -04:00
|
|
|
event_id_to_state_group[evid] for evid in new_latest_event_ids
|
2020-02-21 07:15:07 -05:00
|
|
|
}
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
# If they old and new groups are the same then we don't need to do
|
|
|
|
# anything.
|
|
|
|
if old_state_groups == new_state_groups:
|
2020-12-18 04:49:18 -05:00
|
|
|
return None, None, new_latest_event_ids
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
|
|
|
|
# If we're going from one state group to another, lets check if
|
|
|
|
# we have a delta for that transition. If we do then we can just
|
|
|
|
# return that.
|
|
|
|
|
|
|
|
new_state_group = next(iter(new_state_groups))
|
|
|
|
old_state_group = next(iter(old_state_groups))
|
|
|
|
|
|
|
|
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
|
|
|
|
if delta_ids is not None:
|
|
|
|
# We have a delta from the existing to new current state,
|
2022-05-10 15:43:13 -04:00
|
|
|
# so lets just return that.
|
|
|
|
return None, delta_ids, new_latest_event_ids
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
# Now that we have calculated new_state_groups we need to get
|
|
|
|
# their state IDs so we can resolve to a single state set.
|
2022-05-10 15:43:13 -04:00
|
|
|
state_groups_map = await self.state_store._get_state_for_groups(
|
|
|
|
new_state_groups
|
|
|
|
)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
if len(new_state_groups) == 1:
|
|
|
|
# If there is only one state group, then we know what the current
|
|
|
|
# state is.
|
2020-12-18 04:49:18 -05:00
|
|
|
return state_groups_map[new_state_groups.pop()], None, new_latest_event_ids
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
# Ok, we need to defer to the state handler to resolve our state sets.
|
|
|
|
|
|
|
|
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
|
|
|
|
|
|
|
|
events_map = {ev.event_id: ev for ev, _ in events_context}
|
|
|
|
|
|
|
|
# We need to get the room version, which is in the create event.
|
|
|
|
# Normally that'd be in the database, but its also possible that we're
|
|
|
|
# currently trying to persist it.
|
|
|
|
room_version = None
|
|
|
|
for ev, _ in events_context:
|
|
|
|
if ev.type == EventTypes.Create and ev.state_key == "":
|
|
|
|
room_version = ev.content.get("room_version", "1")
|
|
|
|
break
|
|
|
|
|
|
|
|
if not room_version:
|
2020-01-31 05:06:21 -05:00
|
|
|
room_version = await self.main_store.get_room_version_id(room_id)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
logger.debug("calling resolve_state_groups from preserve_events")
|
2020-07-24 10:59:51 -04:00
|
|
|
|
|
|
|
# Avoid a circular import.
|
|
|
|
from synapse.state import StateResolutionStore
|
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
res = await self._state_resolution_handler.resolve_state_groups(
|
2019-10-23 07:00:21 -04:00
|
|
|
room_id,
|
|
|
|
room_version,
|
|
|
|
state_groups,
|
|
|
|
events_map,
|
|
|
|
state_res_store=StateResolutionStore(self.main_store),
|
|
|
|
)
|
|
|
|
|
2020-12-18 04:49:18 -05:00
|
|
|
state_resolutions_during_persistence.inc()
|
|
|
|
|
|
|
|
# If the returned state matches the state group of one of the new
|
|
|
|
# forward extremities then we check if we are able to prune some state
|
|
|
|
# extremities.
|
|
|
|
if res.state_group and res.state_group in new_state_groups:
|
|
|
|
new_latest_event_ids = await self._prune_extremities(
|
|
|
|
room_id,
|
|
|
|
new_latest_event_ids,
|
|
|
|
res.state_group,
|
|
|
|
event_id_to_state_group,
|
|
|
|
events_context,
|
|
|
|
)
|
|
|
|
|
2022-07-15 07:06:41 -04:00
|
|
|
full_state = await res.get_state(self._state_controller)
|
|
|
|
return full_state, None, new_latest_event_ids
|
2020-12-18 04:49:18 -05:00
|
|
|
|
|
|
|
async def _prune_extremities(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
new_latest_event_ids: Set[str],
|
|
|
|
resolved_state_group: int,
|
|
|
|
event_id_to_state_group: Dict[str, int],
|
|
|
|
events_context: List[Tuple[EventBase, EventContext]],
|
|
|
|
) -> Set[str]:
|
|
|
|
"""See if we can prune any of the extremities after calculating the
|
|
|
|
resolved state.
|
|
|
|
"""
|
|
|
|
potential_times_prune_extremities.inc()
|
|
|
|
|
|
|
|
# We keep all the extremities that have the same state group, and
|
|
|
|
# see if we can drop the others.
|
|
|
|
new_new_extrems = {
|
|
|
|
e
|
|
|
|
for e in new_latest_event_ids
|
|
|
|
if event_id_to_state_group[e] == resolved_state_group
|
|
|
|
}
|
|
|
|
|
|
|
|
dropped_extrems = set(new_latest_event_ids) - new_new_extrems
|
|
|
|
|
|
|
|
logger.debug("Might drop extremities: %s", dropped_extrems)
|
|
|
|
|
|
|
|
# We only drop events from the extremities list if:
|
|
|
|
# 1. we're not currently persisting them;
|
|
|
|
# 2. they're not our own events (or are dummy events); and
|
|
|
|
# 3. they're either:
|
|
|
|
# 1. over N hours old and more than N events ago (we use depth to
|
|
|
|
# calculate); or
|
|
|
|
# 2. we are persisting an event from the same domain and more than
|
|
|
|
# M events ago.
|
|
|
|
#
|
|
|
|
# The idea is that we don't want to drop events that are "legitimate"
|
|
|
|
# extremities (that we would want to include as prev events), only
|
|
|
|
# "stuck" extremities that are e.g. due to a gap in the graph.
|
|
|
|
#
|
|
|
|
# Note that we either drop all of them or none of them. If we only drop
|
|
|
|
# some of the events we don't know if state res would come to the same
|
|
|
|
# conclusion.
|
|
|
|
|
|
|
|
for ev, _ in events_context:
|
|
|
|
if ev.event_id in dropped_extrems:
|
|
|
|
logger.debug(
|
|
|
|
"Not dropping extremities: %s is being persisted", ev.event_id
|
|
|
|
)
|
|
|
|
return new_latest_event_ids
|
|
|
|
|
|
|
|
dropped_events = await self.main_store.get_events(
|
|
|
|
dropped_extrems,
|
|
|
|
allow_rejected=True,
|
2022-05-04 07:26:11 -04:00
|
|
|
redact_behaviour=EventRedactBehaviour.as_is,
|
2020-12-18 04:49:18 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
new_senders = {get_domain_from_id(e.sender) for e, _ in events_context}
|
|
|
|
|
|
|
|
one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
|
|
|
|
current_depth = max(e.depth for e, _ in events_context)
|
|
|
|
for event in dropped_events.values():
|
|
|
|
# If the event is a local dummy event then we should check it
|
|
|
|
# doesn't reference any local events, as we want to reference those
|
|
|
|
# if we send any new events.
|
|
|
|
#
|
|
|
|
# Note we do this recursively to handle the case where a dummy event
|
|
|
|
# references a dummy event that only references remote events.
|
|
|
|
#
|
|
|
|
# Ideally we'd figure out a way of still being able to drop old
|
|
|
|
# dummy events that reference local events, but this is good enough
|
|
|
|
# as a first cut.
|
2022-02-25 05:19:49 -05:00
|
|
|
events_to_check: Collection[EventBase] = [event]
|
2020-12-18 04:49:18 -05:00
|
|
|
while events_to_check:
|
2022-02-25 05:19:49 -05:00
|
|
|
new_events: Set[str] = set()
|
2020-12-18 04:49:18 -05:00
|
|
|
for event_to_check in events_to_check:
|
|
|
|
if self.is_mine_id(event_to_check.sender):
|
|
|
|
if event_to_check.type != EventTypes.Dummy:
|
|
|
|
logger.debug("Not dropping own event")
|
|
|
|
return new_latest_event_ids
|
|
|
|
new_events.update(event_to_check.prev_event_ids())
|
|
|
|
|
|
|
|
prev_events = await self.main_store.get_events(
|
|
|
|
new_events,
|
|
|
|
allow_rejected=True,
|
2022-05-04 07:26:11 -04:00
|
|
|
redact_behaviour=EventRedactBehaviour.as_is,
|
2020-12-18 04:49:18 -05:00
|
|
|
)
|
|
|
|
events_to_check = prev_events.values()
|
|
|
|
|
|
|
|
if (
|
|
|
|
event.origin_server_ts < one_day_ago
|
|
|
|
and event.depth < current_depth - 100
|
|
|
|
):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# We can be less conservative about dropping extremities from the
|
|
|
|
# same domain, though we do want to wait a little bit (otherwise
|
|
|
|
# we'll immediately remove all extremities from a given server).
|
|
|
|
if (
|
|
|
|
get_domain_from_id(event.sender) in new_senders
|
|
|
|
and event.depth < current_depth - 20
|
|
|
|
):
|
|
|
|
continue
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"Not dropping as too new and not in new_senders: %s",
|
|
|
|
new_senders,
|
|
|
|
)
|
|
|
|
|
|
|
|
return new_latest_event_ids
|
|
|
|
|
|
|
|
times_pruned_extremities.inc()
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"Pruning forward extremities in room %s: from %s -> %s",
|
|
|
|
room_id,
|
|
|
|
new_latest_event_ids,
|
|
|
|
new_new_extrems,
|
|
|
|
)
|
|
|
|
return new_new_extrems
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
async def _calculate_state_delta(
|
|
|
|
self, room_id: str, current_state: StateMap[str]
|
|
|
|
) -> DeltaState:
|
2019-10-23 07:00:21 -04:00
|
|
|
"""Calculate the new state deltas for a room.
|
|
|
|
|
|
|
|
Assumes that we are only persisting events for one room at a time.
|
|
|
|
"""
|
2022-06-01 11:02:53 -04:00
|
|
|
existing_state = await self.main_store.get_partial_current_state_ids(room_id)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
|
|
|
to_delete = [key for key in existing_state if key not in current_state]
|
|
|
|
|
|
|
|
to_insert = {
|
|
|
|
key: ev_id
|
2020-06-15 07:03:36 -04:00
|
|
|
for key, ev_id in current_state.items()
|
2019-10-23 07:00:21 -04:00
|
|
|
if ev_id != existing_state.get(key)
|
|
|
|
}
|
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
return DeltaState(to_delete=to_delete, to_insert=to_insert)
|
2020-01-29 06:01:32 -05:00
|
|
|
|
|
|
|
async def _is_server_still_joined(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
2020-07-28 16:09:53 -04:00
|
|
|
ev_ctx_rm: List[Tuple[EventBase, EventContext]],
|
2020-01-29 06:01:32 -05:00
|
|
|
delta: DeltaState,
|
|
|
|
) -> bool:
|
|
|
|
"""Check if the server will still be joined after the given events have
|
|
|
|
been persised.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id
|
|
|
|
ev_ctx_rm
|
|
|
|
delta: The delta of current state between what is in the database
|
|
|
|
and what the new current state will be.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not any(
|
|
|
|
self.is_mine_id(state_key)
|
|
|
|
for typ, state_key in itertools.chain(delta.to_delete, delta.to_insert)
|
|
|
|
if typ == EventTypes.Member
|
|
|
|
):
|
|
|
|
# There have been no changes to membership of our users, so nothing
|
|
|
|
# has changed and we assume we're still in the room.
|
|
|
|
return True
|
|
|
|
|
|
|
|
# Check if any of the given events are a local join that appear in the
|
|
|
|
# current state
|
2020-02-19 05:15:49 -05:00
|
|
|
events_to_check = [] # Event IDs that aren't an event we're persisting
|
2020-01-29 06:01:32 -05:00
|
|
|
for (typ, state_key), event_id in delta.to_insert.items():
|
|
|
|
if typ != EventTypes.Member or not self.is_mine_id(state_key):
|
|
|
|
continue
|
|
|
|
|
|
|
|
for event, _ in ev_ctx_rm:
|
|
|
|
if event_id == event.event_id:
|
|
|
|
if event.membership == Membership.JOIN:
|
|
|
|
return True
|
|
|
|
|
2020-02-19 05:15:49 -05:00
|
|
|
# The event is not in `ev_ctx_rm`, so we need to pull it out of
|
|
|
|
# the DB.
|
|
|
|
events_to_check.append(event_id)
|
|
|
|
|
|
|
|
# Check if any of the changes that we don't have events for are joins.
|
|
|
|
if events_to_check:
|
2022-03-25 10:58:56 -04:00
|
|
|
members = await self.main_store.get_membership_from_event_ids(
|
|
|
|
events_to_check
|
|
|
|
)
|
|
|
|
is_still_joined = any(
|
|
|
|
member and member.membership == Membership.JOIN
|
|
|
|
for member in members.values()
|
|
|
|
)
|
2020-02-19 05:15:49 -05:00
|
|
|
if is_still_joined:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# None of the new state events are local joins, so we check the database
|
|
|
|
# to see if there are any other local users in the room. We ignore users
|
|
|
|
# whose state has changed as we've already their new state above.
|
|
|
|
users_to_ignore = [
|
|
|
|
state_key
|
2020-05-21 08:20:10 -04:00
|
|
|
for typ, state_key in itertools.chain(delta.to_insert, delta.to_delete)
|
|
|
|
if typ == EventTypes.Member and self.is_mine_id(state_key)
|
2020-02-19 05:15:49 -05:00
|
|
|
]
|
|
|
|
|
|
|
|
if await self.main_store.is_local_host_in_room_ignoring_users(
|
|
|
|
room_id, users_to_ignore
|
|
|
|
):
|
|
|
|
return True
|
|
|
|
|
2020-01-30 11:10:30 -05:00
|
|
|
return False
|