2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2019-05-30 06:22:59 -04:00
|
|
|
# Copyright 2018-2019 New Vector Ltd
|
2021-11-22 12:01:47 -05:00
|
|
|
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
|
2015-03-20 09:52:56 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-04-17 13:30:53 -04:00
|
|
|
import itertools
|
2018-03-29 17:57:28 -04:00
|
|
|
import logging
|
2021-11-26 13:41:31 -05:00
|
|
|
from collections import OrderedDict
|
2021-01-11 11:09:22 -05:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
2021-12-10 10:02:33 -05:00
|
|
|
Collection,
|
2021-01-11 11:09:22 -05:00
|
|
|
Dict,
|
|
|
|
Generator,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
2021-11-02 09:55:52 -04:00
|
|
|
Sequence,
|
2021-01-11 11:09:22 -05:00
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
)
|
2018-07-09 02:09:20 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
import attr
|
2019-10-23 07:00:21 -04:00
|
|
|
from prometheus_client import Counter
|
2018-06-28 09:49:57 -04:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
import synapse.metrics
|
2020-07-07 09:20:40 -04:00
|
|
|
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
2020-01-29 06:01:32 -05:00
|
|
|
from synapse.api.room_versions import RoomVersions
|
2022-05-16 08:42:45 -04:00
|
|
|
from synapse.events import EventBase, relation_from_event
|
|
|
|
from synapse.events.snapshot import EventContext
|
2020-07-16 11:32:19 -04:00
|
|
|
from synapse.storage._base import db_to_json, make_in_list_sql_clause
|
2021-12-13 12:05:00 -05:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2021-11-26 13:41:31 -05:00
|
|
|
from synapse.storage.databases.main.events_worker import EventCacheEntry
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.databases.main.search import SearchEntry
|
2022-05-03 06:47:21 -04:00
|
|
|
from synapse.storage.engines.postgres import PostgresEngine
|
2021-11-26 13:41:31 -05:00
|
|
|
from synapse.storage.util.id_generators import AbstractStreamIdGenerator
|
2021-02-24 05:13:53 -05:00
|
|
|
from synapse.storage.util.sequence import SequenceGenerator
|
2022-05-10 14:07:48 -04:00
|
|
|
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
2020-10-28 11:51:15 -04:00
|
|
|
from synapse.util import json_encoder
|
2021-01-11 11:09:22 -05:00
|
|
|
from synapse.util.iterutils import batch_iter, sorted_topologically
|
2022-05-18 06:28:14 -04:00
|
|
|
from synapse.util.stringutils import non_null_str_or_none
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.databases.main import DataStore
|
2020-05-13 08:38:22 -04:00
|
|
|
|
|
|
|
|
2018-05-21 20:48:57 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2018-05-21 20:48:57 -04:00
|
|
|
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
|
2019-03-28 09:37:16 -04:00
|
|
|
event_counter = Counter(
|
|
|
|
"synapse_storage_events_persisted_events_sep",
|
|
|
|
"",
|
|
|
|
["type", "origin_type", "origin_entity"],
|
|
|
|
)
|
2018-03-27 08:13:38 -04:00
|
|
|
|
2016-06-06 06:58:09 -04:00
|
|
|
|
2022-01-13 08:49:28 -05:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-05-13 08:38:22 -04:00
|
|
|
class DeltaState:
|
|
|
|
"""Deltas to use to update the `current_state_events` table.
|
2019-05-28 13:52:41 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Attributes:
|
|
|
|
to_delete: List of type/state_keys to delete from current state
|
|
|
|
to_insert: Map of state to upsert into current state
|
|
|
|
no_longer_in_room: The server is not longer in the room, so the room
|
|
|
|
should e.g. be removed from `current_state_events` table.
|
|
|
|
"""
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2022-01-13 08:49:28 -05:00
|
|
|
to_delete: List[Tuple[str, str]]
|
|
|
|
to_insert: StateMap[str]
|
|
|
|
no_longer_in_room: bool = False
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2019-07-03 04:31:27 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
class PersistEventsStore:
|
|
|
|
"""Contains all the functions for writing events to the database.
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2020-05-13 05:37:11 -04:00
|
|
|
Should only be instantiated on one process (when using a worker mode setup).
|
2020-05-13 08:38:22 -04:00
|
|
|
|
|
|
|
Note: This is not part of the `DataStore` mixin.
|
|
|
|
"""
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
def __init__(
|
2021-02-23 07:33:24 -05:00
|
|
|
self,
|
|
|
|
hs: "HomeServer",
|
|
|
|
db: DatabasePool,
|
|
|
|
main_data_store: "DataStore",
|
2021-12-13 12:05:00 -05:00
|
|
|
db_conn: LoggingDatabaseConnection,
|
2020-08-05 16:38:57 -04:00
|
|
|
):
|
2020-05-13 08:38:22 -04:00
|
|
|
self.hs = hs
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool = db
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store = main_data_store
|
|
|
|
self.database_engine = db.engine
|
|
|
|
self._clock = hs.get_clock()
|
2020-09-14 05:16:41 -04:00
|
|
|
self._instance_name = hs.get_instance_name()
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2021-09-29 06:44:15 -04:00
|
|
|
self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
|
2020-01-15 09:59:33 -05:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-22 11:11:35 -04:00
|
|
|
# This should only exist on instances that are configured to write
|
2020-05-13 08:38:22 -04:00
|
|
|
assert (
|
2020-09-14 05:16:41 -04:00
|
|
|
hs.get_instance_name() in hs.config.worker.writers.events
|
2020-05-22 11:11:35 -04:00
|
|
|
), "Can only instantiate EventsStore on master"
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2021-11-26 13:41:31 -05:00
|
|
|
# Since we have been configured to write, we ought to have id generators,
|
|
|
|
# rather than id trackers.
|
|
|
|
assert isinstance(self.store._backfill_id_gen, AbstractStreamIdGenerator)
|
|
|
|
assert isinstance(self.store._stream_id_gen, AbstractStreamIdGenerator)
|
|
|
|
|
|
|
|
# Ideally we'd move these ID gens here, unfortunately some other ID
|
|
|
|
# generators are chained off them so doing so is a bit of a PITA.
|
|
|
|
self._backfill_id_gen: AbstractStreamIdGenerator = self.store._backfill_id_gen
|
|
|
|
self._stream_id_gen: AbstractStreamIdGenerator = self.store._stream_id_gen
|
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
async def _persist_events_and_state_updates(
|
2019-10-23 07:00:21 -04:00
|
|
|
self,
|
2020-01-20 13:07:20 -05:00
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
2021-11-29 17:01:54 -05:00
|
|
|
*,
|
2020-01-20 13:07:20 -05:00
|
|
|
state_delta_for_room: Dict[str, DeltaState],
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities: Dict[str, Set[str]],
|
2021-11-29 17:01:54 -05:00
|
|
|
use_negative_stream_ordering: bool = False,
|
|
|
|
inhibit_local_membership_updates: bool = False,
|
2020-08-14 10:05:19 -04:00
|
|
|
) -> None:
|
2019-10-30 09:33:38 -04:00
|
|
|
"""Persist a set of events alongside updates to the current state and
|
|
|
|
forward extremities tables.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Args:
|
2020-01-20 13:07:20 -05:00
|
|
|
events_and_contexts:
|
|
|
|
state_delta_for_room: Map from room_id to the delta to apply to
|
|
|
|
room state
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities: Map from room_id to set of event IDs
|
2020-01-20 13:07:20 -05:00
|
|
|
that are the new forward extremities of the room.
|
2021-11-29 17:01:54 -05:00
|
|
|
use_negative_stream_ordering: Whether to start stream_ordering on
|
|
|
|
the negative side and decrement. This should be set as True
|
|
|
|
for backfilled events because backfilled events get a negative
|
|
|
|
stream ordering so they don't come down incremental `/sync`.
|
|
|
|
inhibit_local_membership_updates: Stop the local_current_membership
|
|
|
|
from being updated by these events. This should be set to True
|
|
|
|
for backfilled events because backfilled events in the past do
|
|
|
|
not affect the current local state.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-14 10:05:19 -04:00
|
|
|
Resolves when the events have been persisted
|
2017-03-17 07:51:13 -04:00
|
|
|
"""
|
2015-06-25 12:18:19 -04:00
|
|
|
|
2019-10-23 07:00:21 -04:00
|
|
|
# We want to calculate the stream orderings as late as possible, as
|
|
|
|
# we only notify after all events with a lesser stream ordering have
|
|
|
|
# been persisted. I.e. if we spend 10s inside the with block then
|
|
|
|
# that will delay all subsequent events from being notified about.
|
|
|
|
# Hence why we do it down here rather than wrapping the entire
|
|
|
|
# function.
|
|
|
|
#
|
|
|
|
# Its safe to do this after calculating the state deltas etc as we
|
|
|
|
# only need to protect the *persistence* of the events. This is to
|
|
|
|
# ensure that queries of the form "fetch events since X" don't
|
|
|
|
# return events and stream positions after events that are still in
|
|
|
|
# flight, as otherwise subsequent requests "fetch event since Y"
|
|
|
|
# will not return those events.
|
|
|
|
#
|
|
|
|
# Note: Multiple instances of this function cannot be in flight at
|
|
|
|
# the same time for the same room.
|
2021-11-29 17:01:54 -05:00
|
|
|
if use_negative_stream_ordering:
|
2020-09-23 11:11:18 -04:00
|
|
|
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
2019-10-23 07:00:21 -04:00
|
|
|
len(events_and_contexts)
|
|
|
|
)
|
|
|
|
else:
|
2020-09-23 11:11:18 -04:00
|
|
|
stream_ordering_manager = self._stream_id_gen.get_next_mult(
|
2019-10-23 07:00:21 -04:00
|
|
|
len(events_and_contexts)
|
|
|
|
)
|
2019-08-06 08:27:22 -04:00
|
|
|
|
2020-09-23 11:11:18 -04:00
|
|
|
async with stream_ordering_manager as stream_orderings:
|
2021-04-20 06:50:49 -04:00
|
|
|
for (event, _), stream in zip(events_and_contexts, stream_orderings):
|
2019-10-23 07:00:21 -04:00
|
|
|
event.internal_metadata.stream_ordering = stream
|
2019-08-06 08:27:22 -04:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-10-23 07:00:21 -04:00
|
|
|
"persist_events",
|
|
|
|
self._persist_events_txn,
|
|
|
|
events_and_contexts=events_and_contexts,
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates=inhibit_local_membership_updates,
|
2019-10-23 07:00:21 -04:00
|
|
|
state_delta_for_room=state_delta_for_room,
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities=new_forward_extremities,
|
2019-10-23 07:00:21 -04:00
|
|
|
)
|
|
|
|
persist_event_counter.inc(len(events_and_contexts))
|
2019-08-06 08:27:22 -04:00
|
|
|
|
2022-04-06 09:52:39 -04:00
|
|
|
if not use_negative_stream_ordering:
|
|
|
|
# we don't want to set the event_persisted_position to a negative
|
|
|
|
# stream_ordering.
|
2022-04-06 08:59:04 -04:00
|
|
|
synapse.metrics.event_persisted_position.set(stream)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2019-10-23 07:00:21 -04:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if context.app_service:
|
|
|
|
origin_type = "local"
|
|
|
|
origin_entity = context.app_service.id
|
|
|
|
elif self.hs.is_mine_id(event.sender):
|
|
|
|
origin_type = "local"
|
|
|
|
origin_entity = "*client*"
|
|
|
|
else:
|
|
|
|
origin_type = "remote"
|
|
|
|
origin_entity = get_domain_from_id(event.sender)
|
|
|
|
|
|
|
|
event_counter.labels(event.type, origin_type, origin_entity).inc()
|
|
|
|
|
2022-02-25 05:19:49 -05:00
|
|
|
for room_id, latest_event_ids in new_forward_extremities.items():
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.get_latest_event_ids_in_room.prefill(
|
2019-10-23 07:00:21 -04:00
|
|
|
(room_id,), list(latest_event_ids)
|
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
|
2018-10-02 18:33:29 -04:00
|
|
|
"""Filter the supplied list of event_ids to get those which are prev_events of
|
2018-10-03 05:19:41 -04:00
|
|
|
existing (non-outlier/rejected) events.
|
2018-10-02 18:33:29 -04:00
|
|
|
|
|
|
|
Args:
|
2020-08-14 10:05:19 -04:00
|
|
|
event_ids: event ids to filter
|
2018-10-02 18:33:29 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-14 10:05:19 -04:00
|
|
|
Filtered event ids
|
2018-10-02 18:33:29 -04:00
|
|
|
"""
|
2021-07-15 12:46:54 -04:00
|
|
|
results: List[str] = []
|
2018-10-02 18:33:29 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _get_events_which_are_prevs_txn(
|
|
|
|
txn: LoggingTransaction, batch: Collection[str]
|
|
|
|
) -> None:
|
2018-10-02 18:33:29 -04:00
|
|
|
sql = """
|
2019-05-21 11:10:54 -04:00
|
|
|
SELECT prev_event_id, internal_metadata
|
2018-10-02 18:33:29 -04:00
|
|
|
FROM event_edges
|
|
|
|
INNER JOIN events USING (event_id)
|
|
|
|
LEFT JOIN rejections USING (event_id)
|
2019-05-21 11:10:54 -04:00
|
|
|
LEFT JOIN event_json USING (event_id)
|
2018-10-02 18:33:29 -04:00
|
|
|
WHERE
|
2019-10-02 14:07:07 -04:00
|
|
|
NOT events.outlier
|
2018-10-02 18:33:29 -04:00
|
|
|
AND rejections.event_id IS NULL
|
2019-10-02 14:07:07 -04:00
|
|
|
AND
|
|
|
|
"""
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "prev_event_id", batch
|
2018-10-02 18:33:29 -04:00
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(sql + clause, args)
|
2020-07-16 11:32:19 -04:00
|
|
|
results.extend(r[0] for r in txn if not db_to_json(r[1]).get("soft_failed"))
|
2018-10-02 18:33:29 -04:00
|
|
|
|
|
|
|
for chunk in batch_iter(event_ids, 100):
|
2020-08-14 10:05:19 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-06-13 08:40:52 -04:00
|
|
|
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
|
2019-05-29 06:56:24 -04:00
|
|
|
)
|
2018-10-02 18:33:29 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return results
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
async def _get_prevs_before_rejected(self, event_ids: Iterable[str]) -> Set[str]:
|
2019-05-29 06:56:24 -04:00
|
|
|
"""Get soft-failed ancestors to remove from the extremities.
|
|
|
|
|
|
|
|
Given a set of events, find all those that have been soft-failed or
|
|
|
|
rejected. Returns those soft failed/rejected events and their prev
|
|
|
|
events (whether soft-failed/rejected or not), and recurses up the
|
|
|
|
prev-event graph until it finds no more soft-failed/rejected events.
|
|
|
|
|
|
|
|
This is used to find extremities that are ancestors of new events, but
|
|
|
|
are separated by soft failed events.
|
|
|
|
|
|
|
|
Args:
|
2020-08-14 10:05:19 -04:00
|
|
|
event_ids: Events to find prev events for. Note that these must have
|
|
|
|
already been persisted.
|
2019-05-29 06:56:24 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-14 10:05:19 -04:00
|
|
|
The previous events.
|
2019-05-29 06:56:24 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
# The set of event_ids to return. This includes all soft-failed events
|
|
|
|
# and their prev events.
|
|
|
|
existing_prevs = set()
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _get_prevs_before_rejected_txn(
|
|
|
|
txn: LoggingTransaction, batch: Collection[str]
|
|
|
|
) -> None:
|
2019-05-29 06:56:24 -04:00
|
|
|
to_recursively_check = batch
|
|
|
|
|
|
|
|
while to_recursively_check:
|
|
|
|
sql = """
|
|
|
|
SELECT
|
|
|
|
event_id, prev_event_id, internal_metadata,
|
|
|
|
rejections.event_id IS NOT NULL
|
|
|
|
FROM event_edges
|
|
|
|
INNER JOIN events USING (event_id)
|
|
|
|
LEFT JOIN rejections USING (event_id)
|
|
|
|
LEFT JOIN event_json USING (event_id)
|
|
|
|
WHERE
|
2019-10-02 14:07:07 -04:00
|
|
|
NOT events.outlier
|
|
|
|
AND
|
|
|
|
"""
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "event_id", to_recursively_check
|
2019-05-29 06:56:24 -04:00
|
|
|
)
|
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(sql + clause, args)
|
2019-05-29 06:56:24 -04:00
|
|
|
to_recursively_check = []
|
|
|
|
|
2021-04-20 06:50:49 -04:00
|
|
|
for _, prev_event_id, metadata, rejected in txn:
|
2019-05-29 06:56:24 -04:00
|
|
|
if prev_event_id in existing_prevs:
|
|
|
|
continue
|
|
|
|
|
2020-07-16 11:32:19 -04:00
|
|
|
soft_failed = db_to_json(metadata).get("soft_failed")
|
2019-05-29 06:56:24 -04:00
|
|
|
if soft_failed or rejected:
|
|
|
|
to_recursively_check.append(prev_event_id)
|
|
|
|
existing_prevs.add(prev_event_id)
|
|
|
|
|
|
|
|
for chunk in batch_iter(event_ids, 100):
|
2020-08-14 10:05:19 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-06-13 08:40:52 -04:00
|
|
|
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
|
2019-05-29 06:56:24 -04:00
|
|
|
)
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return existing_prevs
|
2019-05-29 06:56:24 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
def _persist_events_txn(
|
|
|
|
self,
|
2020-01-20 13:07:20 -05:00
|
|
|
txn: LoggingTransaction,
|
2021-11-29 17:01:54 -05:00
|
|
|
*,
|
2020-01-20 13:07:20 -05:00
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates: bool = False,
|
2021-04-08 17:38:54 -04:00
|
|
|
state_delta_for_room: Optional[Dict[str, DeltaState]] = None,
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities: Optional[Dict[str, Set[str]]] = None,
|
|
|
|
) -> None:
|
2016-07-26 05:49:52 -04:00
|
|
|
"""Insert some number of room events into the necessary database tables.
|
|
|
|
|
|
|
|
Rejected events are only inserted into the events table, the events_json table,
|
|
|
|
and the rejections table. Things reading from those table will need to check
|
|
|
|
whether the event was rejected.
|
2016-08-04 10:02:15 -04:00
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
Args:
|
2020-01-20 13:07:20 -05:00
|
|
|
txn
|
|
|
|
events_and_contexts: events to persist
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates: Stop the local_current_membership
|
|
|
|
from being updated by these events. This should be set to True
|
|
|
|
for backfilled events because backfilled events in the past do
|
|
|
|
not affect the current local state.
|
2020-01-20 13:07:20 -05:00
|
|
|
delete_existing True to purge existing table rows for the events
|
|
|
|
from the database. This is useful when retrying due to
|
2017-03-17 07:51:13 -04:00
|
|
|
IntegrityError.
|
2020-01-20 13:07:20 -05:00
|
|
|
state_delta_for_room: The current-state delta for each room.
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities: The new forward extremities for each room.
|
2020-01-20 13:07:20 -05:00
|
|
|
For each room, a list of the event ids which are the forward
|
|
|
|
extremities.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
"""
|
2021-04-08 17:38:54 -04:00
|
|
|
state_delta_for_room = state_delta_for_room or {}
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities = new_forward_extremities or {}
|
2021-04-08 17:38:54 -04:00
|
|
|
|
2018-02-20 07:33:04 -05:00
|
|
|
all_events_and_contexts = events_and_contexts
|
|
|
|
|
2019-04-02 07:42:39 -04:00
|
|
|
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
|
2017-01-20 09:28:53 -05:00
|
|
|
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
|
2017-05-30 09:41:42 -04:00
|
|
|
|
2022-05-03 06:47:21 -04:00
|
|
|
# We check that the room still exists for events we're trying to
|
|
|
|
# persist. This is to protect against races with deleting a room.
|
|
|
|
#
|
|
|
|
# Annoyingly SQLite doesn't support row level locking.
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
for room_id in {e.room_id for e, _ in events_and_contexts}:
|
|
|
|
txn.execute(
|
|
|
|
"SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row is None:
|
|
|
|
raise Exception(f"Room does not exist {room_id}")
|
|
|
|
|
2020-10-05 09:43:14 -04:00
|
|
|
# stream orderings should have been assigned by now
|
|
|
|
assert min_stream_order
|
|
|
|
assert max_stream_order
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
self._update_forward_extremities_txn(
|
|
|
|
txn,
|
2022-02-25 05:19:49 -05:00
|
|
|
new_forward_extremities=new_forward_extremities,
|
2017-03-17 07:51:13 -04:00
|
|
|
max_stream_order=max_stream_order,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Ensure that we don't have the same event twice.
|
|
|
|
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
|
2019-03-28 09:37:16 -04:00
|
|
|
events_and_contexts
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
|
|
|
|
2021-11-29 17:01:54 -05:00
|
|
|
self._update_room_depths_txn(txn, events_and_contexts=events_and_contexts)
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
# _update_outliers_txn filters out any events which have already been
|
|
|
|
# persisted, and returns the filtered list.
|
|
|
|
events_and_contexts = self._update_outliers_txn(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, events_and_contexts=events_and_contexts
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# From this point onwards the events are only events that we haven't
|
|
|
|
# seen before.
|
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
self._persist_transaction_ids_txn(txn, events_and_contexts)
|
|
|
|
|
2018-02-06 09:31:24 -05:00
|
|
|
# Insert into event_to_state_groups.
|
|
|
|
self._store_event_state_mappings_txn(txn, events_and_contexts)
|
2017-03-17 10:30:16 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
self._persist_event_auth_chain_txn(txn, [e for e, _ in events_and_contexts])
|
|
|
|
|
|
|
|
# _store_rejected_events_txn filters out any events which were
|
|
|
|
# rejected, and returns the filtered list.
|
|
|
|
events_and_contexts = self._store_rejected_events_txn(
|
|
|
|
txn, events_and_contexts=events_and_contexts
|
|
|
|
)
|
|
|
|
|
|
|
|
# From this point onwards the events are only ones that weren't
|
|
|
|
# rejected.
|
|
|
|
|
|
|
|
self._update_metadata_tables_txn(
|
|
|
|
txn,
|
|
|
|
events_and_contexts=events_and_contexts,
|
|
|
|
all_events_and_contexts=all_events_and_contexts,
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates=inhibit_local_membership_updates,
|
2021-01-11 11:09:22 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
# We call this last as it assumes we've inserted the events into
|
|
|
|
# room_memberships, where applicable.
|
|
|
|
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
|
|
|
|
|
|
|
|
def _persist_event_auth_chain_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events: List[EventBase],
|
2021-01-11 11:09:22 -05:00
|
|
|
) -> None:
|
|
|
|
|
|
|
|
# We only care about state events, so this if there are no state events.
|
|
|
|
if not any(e.is_state() for e in events):
|
|
|
|
return
|
|
|
|
|
2018-10-16 09:01:53 -04:00
|
|
|
# We want to store event_auth mappings for rejected events, as they're
|
|
|
|
# used in state res v2.
|
|
|
|
# This is only necessary if the rejected event appears in an accepted
|
|
|
|
# event's auth chain, but its easier for now just to store them (and
|
|
|
|
# it doesn't take much storage compared to storing the entire event
|
|
|
|
# anyway).
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2018-10-16 09:01:53 -04:00
|
|
|
txn,
|
|
|
|
table="event_auth",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=("event_id", "room_id", "auth_id"),
|
2018-10-16 09:01:53 -04:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(event.event_id, event.room_id, auth_id)
|
2021-01-11 11:09:22 -05:00
|
|
|
for event in events
|
2018-11-05 08:35:15 -05:00
|
|
|
for auth_id in event.auth_event_ids()
|
2018-10-16 09:01:53 -04:00
|
|
|
if event.is_state()
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
# We now calculate chain ID/sequence numbers for any state events we're
|
|
|
|
# persisting. We ignore out of band memberships as we're not in the room
|
|
|
|
# and won't have their auth chain (we'll fix it up later if we join the
|
|
|
|
# room).
|
|
|
|
#
|
|
|
|
# See: docs/auth_chain_difference_algorithm.md
|
|
|
|
|
|
|
|
# We ignore legacy rooms that we aren't filling the chain cover index
|
|
|
|
# for.
|
|
|
|
rows = self.db_pool.simple_select_many_txn(
|
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
column="room_id",
|
|
|
|
iterable={event.room_id for event in events if event.is_state()},
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("room_id", "has_auth_chain_index"),
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
2021-01-11 11:09:22 -05:00
|
|
|
rooms_using_chain_index = {
|
|
|
|
row["room_id"] for row in rows if row["has_auth_chain_index"]
|
|
|
|
}
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
state_events = {
|
|
|
|
event.event_id: event
|
|
|
|
for event in events
|
|
|
|
if event.is_state() and event.room_id in rooms_using_chain_index
|
|
|
|
}
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
if not state_events:
|
|
|
|
return
|
|
|
|
|
|
|
|
# We need to know the type/state_key and auth events of the events we're
|
|
|
|
# calculating chain IDs for. We don't rely on having the full Event
|
|
|
|
# instances as we'll potentially be pulling more events from the DB and
|
|
|
|
# we don't need the overhead of fetching/parsing the full event JSON.
|
|
|
|
event_to_types = {
|
|
|
|
e.event_id: (e.type, e.state_key) for e in state_events.values()
|
|
|
|
}
|
|
|
|
event_to_auth_chain = {
|
|
|
|
e.event_id: e.auth_event_ids() for e in state_events.values()
|
|
|
|
}
|
2021-01-14 10:18:27 -05:00
|
|
|
event_to_room_id = {e.event_id: e.room_id for e in state_events.values()}
|
|
|
|
|
|
|
|
self._add_chain_cover_index(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
self.db_pool,
|
2021-02-24 05:13:53 -05:00
|
|
|
self.store.event_chain_id_gen,
|
2021-02-16 17:32:34 -05:00
|
|
|
event_to_room_id,
|
|
|
|
event_to_types,
|
|
|
|
event_to_auth_chain,
|
2021-01-14 10:18:27 -05:00
|
|
|
)
|
|
|
|
|
2021-01-21 12:00:12 -05:00
|
|
|
@classmethod
|
2021-01-14 10:18:27 -05:00
|
|
|
def _add_chain_cover_index(
|
2021-01-21 12:00:12 -05:00
|
|
|
cls,
|
2022-05-10 14:07:48 -04:00
|
|
|
txn: LoggingTransaction,
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool: DatabasePool,
|
2021-02-24 05:13:53 -05:00
|
|
|
event_chain_id_gen: SequenceGenerator,
|
2021-01-14 10:18:27 -05:00
|
|
|
event_to_room_id: Dict[str, str],
|
|
|
|
event_to_types: Dict[str, Tuple[str, str]],
|
2021-11-02 09:55:52 -04:00
|
|
|
event_to_auth_chain: Dict[str, Sequence[str]],
|
2021-01-14 10:18:27 -05:00
|
|
|
) -> None:
|
|
|
|
"""Calculate the chain cover index for the given events.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_to_room_id: Event ID to the room ID of the event
|
|
|
|
event_to_types: Event ID to type and state_key of the event
|
|
|
|
event_to_auth_chain: Event ID to list of auth event IDs of the
|
|
|
|
event (events with no auth events can be excluded).
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Map from event ID to chain ID/sequence number.
|
2021-07-15 12:46:54 -04:00
|
|
|
chain_map: Dict[str, Tuple[int, int]] = {}
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# Set of event IDs to calculate chain ID/seq numbers for.
|
2021-01-14 10:18:27 -05:00
|
|
|
events_to_calc_chain_id_for = set(event_to_room_id)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# We check if there are any events that need to be handled in the rooms
|
|
|
|
# we're looking at. These should just be out of band memberships, where
|
|
|
|
# we didn't have the auth chain when we first persisted.
|
2021-01-14 12:19:35 -05:00
|
|
|
rows = db_pool.simple_select_many_txn(
|
2017-03-17 07:51:13 -04:00
|
|
|
txn,
|
2021-01-11 11:09:22 -05:00
|
|
|
table="event_auth_chain_to_calculate",
|
|
|
|
keyvalues={},
|
|
|
|
column="room_id",
|
2021-01-14 10:18:27 -05:00
|
|
|
iterable=set(event_to_room_id.values()),
|
2021-01-11 11:09:22 -05:00
|
|
|
retcols=("event_id", "type", "state_key"),
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
2021-01-11 11:09:22 -05:00
|
|
|
for row in rows:
|
|
|
|
event_id = row["event_id"]
|
|
|
|
event_type = row["type"]
|
|
|
|
state_key = row["state_key"]
|
|
|
|
|
|
|
|
# (We could pull out the auth events for all rows at once using
|
|
|
|
# simple_select_many, but this case happens rarely and almost always
|
|
|
|
# with a single row.)
|
2021-01-14 12:19:35 -05:00
|
|
|
auth_events = db_pool.simple_select_onecol_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
"event_auth",
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
retcol="auth_id",
|
2021-01-11 11:09:22 -05:00
|
|
|
)
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
events_to_calc_chain_id_for.add(event_id)
|
|
|
|
event_to_types[event_id] = (event_type, state_key)
|
|
|
|
event_to_auth_chain[event_id] = auth_events
|
|
|
|
|
|
|
|
# First we get the chain ID and sequence numbers for the events'
|
|
|
|
# auth events (that aren't also currently being persisted).
|
|
|
|
#
|
|
|
|
# Note that there there is an edge case here where we might not have
|
|
|
|
# calculated chains and sequence numbers for events that were "out
|
|
|
|
# of band". We handle this case by fetching the necessary info and
|
|
|
|
# adding it to the set of events to calculate chain IDs for.
|
|
|
|
|
|
|
|
missing_auth_chains = {
|
|
|
|
a_id
|
|
|
|
for auth_events in event_to_auth_chain.values()
|
|
|
|
for a_id in auth_events
|
|
|
|
if a_id not in events_to_calc_chain_id_for
|
|
|
|
}
|
|
|
|
|
|
|
|
# We loop here in case we find an out of band membership and need to
|
|
|
|
# fetch their auth event info.
|
|
|
|
while missing_auth_chains:
|
|
|
|
sql = """
|
2021-12-02 17:42:58 -05:00
|
|
|
SELECT event_id, events.type, se.state_key, chain_id, sequence_number
|
2021-01-11 11:09:22 -05:00
|
|
|
FROM events
|
2021-12-02 17:42:58 -05:00
|
|
|
INNER JOIN state_events AS se USING (event_id)
|
2021-01-11 11:09:22 -05:00
|
|
|
LEFT JOIN event_auth_chains USING (event_id)
|
|
|
|
WHERE
|
|
|
|
"""
|
|
|
|
clause, args = make_in_list_sql_clause(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn.database_engine,
|
|
|
|
"event_id",
|
|
|
|
missing_auth_chains,
|
2021-01-11 11:09:22 -05:00
|
|
|
)
|
|
|
|
txn.execute(sql + clause, args)
|
|
|
|
|
|
|
|
missing_auth_chains.clear()
|
|
|
|
|
2021-09-03 05:59:25 -04:00
|
|
|
for (
|
|
|
|
auth_id,
|
|
|
|
event_type,
|
|
|
|
state_key,
|
|
|
|
chain_id,
|
|
|
|
sequence_number,
|
|
|
|
) in txn.fetchall():
|
2021-01-11 11:09:22 -05:00
|
|
|
event_to_types[auth_id] = (event_type, state_key)
|
|
|
|
|
|
|
|
if chain_id is None:
|
|
|
|
# No chain ID, so the event was persisted out of band.
|
|
|
|
# We add to list of events to calculate auth chains for.
|
|
|
|
|
|
|
|
events_to_calc_chain_id_for.add(auth_id)
|
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
event_to_auth_chain[auth_id] = db_pool.simple_select_onecol_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
"event_auth",
|
|
|
|
keyvalues={"event_id": auth_id},
|
|
|
|
retcol="auth_id",
|
|
|
|
)
|
|
|
|
|
|
|
|
missing_auth_chains.update(
|
|
|
|
e
|
|
|
|
for e in event_to_auth_chain[auth_id]
|
|
|
|
if e not in event_to_types
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
chain_map[auth_id] = (chain_id, sequence_number)
|
|
|
|
|
|
|
|
# Now we check if we have any events where we don't have auth chain,
|
|
|
|
# this should only be out of band memberships.
|
|
|
|
for event_id in sorted_topologically(event_to_auth_chain, event_to_auth_chain):
|
|
|
|
for auth_id in event_to_auth_chain[event_id]:
|
|
|
|
if (
|
|
|
|
auth_id not in chain_map
|
|
|
|
and auth_id not in events_to_calc_chain_id_for
|
|
|
|
):
|
|
|
|
events_to_calc_chain_id_for.discard(event_id)
|
|
|
|
|
|
|
|
# If this is an event we're trying to persist we add it to
|
|
|
|
# the list of events to calculate chain IDs for next time
|
|
|
|
# around. (Otherwise we will have already added it to the
|
|
|
|
# table).
|
2021-01-14 10:18:27 -05:00
|
|
|
room_id = event_to_room_id.get(event_id)
|
|
|
|
if room_id:
|
|
|
|
e_type, state_key = event_to_types[event_id]
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_insert_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_to_calculate",
|
|
|
|
values={
|
2021-01-14 10:18:27 -05:00
|
|
|
"event_id": event_id,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": e_type,
|
|
|
|
"state_key": state_key,
|
2021-01-11 11:09:22 -05:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# We stop checking the event's auth events since we've
|
|
|
|
# discarded it.
|
|
|
|
break
|
|
|
|
|
|
|
|
if not events_to_calc_chain_id_for:
|
|
|
|
return
|
|
|
|
|
2021-01-21 12:00:12 -05:00
|
|
|
# Allocate chain ID/sequence numbers to each new event.
|
|
|
|
new_chain_tuples = cls._allocate_chain_ids(
|
|
|
|
txn,
|
|
|
|
db_pool,
|
2021-02-24 05:13:53 -05:00
|
|
|
event_chain_id_gen,
|
2021-01-21 12:00:12 -05:00
|
|
|
event_to_room_id,
|
|
|
|
event_to_types,
|
|
|
|
event_to_auth_chain,
|
|
|
|
events_to_calc_chain_id_for,
|
|
|
|
chain_map,
|
|
|
|
)
|
|
|
|
chain_map.update(new_chain_tuples)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_insert_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chains",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=("event_id", "chain_id", "sequence_number"),
|
2021-01-11 11:09:22 -05:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(event_id, c_id, seq)
|
2021-01-11 11:09:22 -05:00
|
|
|
for event_id, (c_id, seq) in new_chain_tuples.items()
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_delete_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_to_calculate",
|
|
|
|
keyvalues={},
|
|
|
|
column="event_id",
|
2021-09-20 05:26:13 -04:00
|
|
|
values=new_chain_tuples,
|
2021-01-11 11:09:22 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
# Now we need to calculate any new links between chains caused by
|
|
|
|
# the new events.
|
|
|
|
#
|
|
|
|
# Links are pairs of chain ID/sequence numbers such that for any
|
|
|
|
# event A (CA, SA) and any event B (CB, SB), B is in A's auth chain
|
|
|
|
# if and only if there is at least one link (CA, S1) -> (CB, S2)
|
|
|
|
# where SA >= S1 and S2 >= SB.
|
|
|
|
#
|
|
|
|
# We try and avoid adding redundant links to the table, e.g. if we
|
|
|
|
# have two links between two chains which both start/end at the
|
|
|
|
# sequence number event (or cross) then one can be safely dropped.
|
|
|
|
#
|
|
|
|
# To calculate new links we look at every new event and:
|
|
|
|
# 1. Fetch the chain ID/sequence numbers of its auth events,
|
|
|
|
# discarding any that are reachable by other auth events, or
|
|
|
|
# that have the same chain ID as the event.
|
|
|
|
# 2. For each retained auth event we:
|
|
|
|
# a. Add a link from the event's to the auth event's chain
|
|
|
|
# ID/sequence number; and
|
|
|
|
# b. Add a link from the event to every chain reachable by the
|
|
|
|
# auth event.
|
|
|
|
|
|
|
|
# Step 1, fetch all existing links from all the chains we've seen
|
|
|
|
# referenced.
|
|
|
|
chain_links = _LinkMap()
|
2021-01-14 12:19:35 -05:00
|
|
|
rows = db_pool.simple_select_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_links",
|
|
|
|
column="origin_chain_id",
|
|
|
|
iterable={chain_id for chain_id, _ in chain_map.values()},
|
|
|
|
keyvalues={},
|
|
|
|
retcols=(
|
|
|
|
"origin_chain_id",
|
|
|
|
"origin_sequence_number",
|
|
|
|
"target_chain_id",
|
|
|
|
"target_sequence_number",
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for row in rows:
|
|
|
|
chain_links.add_link(
|
|
|
|
(row["origin_chain_id"], row["origin_sequence_number"]),
|
|
|
|
(row["target_chain_id"], row["target_sequence_number"]),
|
|
|
|
new=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We do this in toplogical order to avoid adding redundant links.
|
|
|
|
for event_id in sorted_topologically(
|
|
|
|
events_to_calc_chain_id_for, event_to_auth_chain
|
|
|
|
):
|
|
|
|
chain_id, sequence_number = chain_map[event_id]
|
|
|
|
|
|
|
|
# Filter out auth events that are reachable by other auth
|
|
|
|
# events. We do this by looking at every permutation of pairs of
|
|
|
|
# auth events (A, B) to check if B is reachable from A.
|
|
|
|
reduction = {
|
|
|
|
a_id
|
2021-01-14 10:18:27 -05:00
|
|
|
for a_id in event_to_auth_chain.get(event_id, [])
|
2021-01-11 11:09:22 -05:00
|
|
|
if chain_map[a_id][0] != chain_id
|
|
|
|
}
|
|
|
|
for start_auth_id, end_auth_id in itertools.permutations(
|
2021-02-16 17:32:34 -05:00
|
|
|
event_to_auth_chain.get(event_id, []),
|
|
|
|
r=2,
|
2021-01-11 11:09:22 -05:00
|
|
|
):
|
|
|
|
if chain_links.exists_path_from(
|
|
|
|
chain_map[start_auth_id], chain_map[end_auth_id]
|
|
|
|
):
|
|
|
|
reduction.discard(end_auth_id)
|
|
|
|
|
|
|
|
# Step 2, figure out what the new links are from the reduced
|
|
|
|
# list of auth events.
|
|
|
|
for auth_id in reduction:
|
|
|
|
auth_chain_id, auth_sequence_number = chain_map[auth_id]
|
|
|
|
|
|
|
|
# Step 2a, add link between the event and auth event
|
|
|
|
chain_links.add_link(
|
|
|
|
(chain_id, sequence_number), (auth_chain_id, auth_sequence_number)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Step 2b, add a link to chains reachable from the auth
|
|
|
|
# event.
|
|
|
|
for target_id, target_seq in chain_links.get_links_from(
|
|
|
|
(auth_chain_id, auth_sequence_number)
|
|
|
|
):
|
|
|
|
if target_id == chain_id:
|
|
|
|
continue
|
|
|
|
|
|
|
|
chain_links.add_link(
|
|
|
|
(chain_id, sequence_number), (target_id, target_seq)
|
|
|
|
)
|
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_insert_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_links",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=(
|
|
|
|
"origin_chain_id",
|
|
|
|
"origin_sequence_number",
|
|
|
|
"target_chain_id",
|
|
|
|
"target_sequence_number",
|
|
|
|
),
|
2021-01-11 11:09:22 -05:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(source_id, source_seq, target_id, target_seq)
|
2021-01-11 11:09:22 -05:00
|
|
|
for (
|
|
|
|
source_id,
|
|
|
|
source_seq,
|
|
|
|
target_id,
|
|
|
|
target_seq,
|
|
|
|
) in chain_links.get_additions()
|
|
|
|
],
|
|
|
|
)
|
2019-07-17 10:33:37 -04:00
|
|
|
|
2021-01-21 12:00:12 -05:00
|
|
|
@staticmethod
|
|
|
|
def _allocate_chain_ids(
|
2022-05-10 14:07:48 -04:00
|
|
|
txn: LoggingTransaction,
|
2021-01-21 12:00:12 -05:00
|
|
|
db_pool: DatabasePool,
|
2021-02-24 05:13:53 -05:00
|
|
|
event_chain_id_gen: SequenceGenerator,
|
2021-01-21 12:00:12 -05:00
|
|
|
event_to_room_id: Dict[str, str],
|
|
|
|
event_to_types: Dict[str, Tuple[str, str]],
|
2021-11-02 09:55:52 -04:00
|
|
|
event_to_auth_chain: Dict[str, Sequence[str]],
|
2021-01-21 12:00:12 -05:00
|
|
|
events_to_calc_chain_id_for: Set[str],
|
|
|
|
chain_map: Dict[str, Tuple[int, int]],
|
|
|
|
) -> Dict[str, Tuple[int, int]]:
|
|
|
|
"""Allocates, but does not persist, chain ID/sequence numbers for the
|
|
|
|
events in `events_to_calc_chain_id_for`. (c.f. _add_chain_cover_index
|
|
|
|
for info on args)
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We now calculate the chain IDs/sequence numbers for the events. We do
|
|
|
|
# this by looking at the chain ID and sequence number of any auth event
|
|
|
|
# with the same type/state_key and incrementing the sequence number by
|
|
|
|
# one. If there was no match or the chain ID/sequence number is already
|
|
|
|
# taken we generate a new chain.
|
|
|
|
#
|
|
|
|
# We try to reduce the number of times that we hit the database by
|
|
|
|
# batching up calls, to make this more efficient when persisting large
|
|
|
|
# numbers of state events (e.g. during joins).
|
|
|
|
#
|
|
|
|
# We do this by:
|
|
|
|
# 1. Calculating for each event which auth event will be used to
|
|
|
|
# inherit the chain ID, i.e. converting the auth chain graph to a
|
|
|
|
# tree that we can allocate chains on. We also keep track of which
|
|
|
|
# existing chain IDs have been referenced.
|
|
|
|
# 2. Fetching the max allocated sequence number for each referenced
|
|
|
|
# existing chain ID, generating a map from chain ID to the max
|
|
|
|
# allocated sequence number.
|
|
|
|
# 3. Iterating over the tree and allocating a chain ID/seq no. to the
|
|
|
|
# new event, by incrementing the sequence number from the
|
|
|
|
# referenced event's chain ID/seq no. and checking that the
|
|
|
|
# incremented sequence number hasn't already been allocated (by
|
|
|
|
# looking in the map generated in the previous step). We generate a
|
|
|
|
# new chain if the sequence number has already been allocated.
|
|
|
|
#
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
existing_chains: Set[int] = set()
|
|
|
|
tree: List[Tuple[str, Optional[str]]] = []
|
2021-01-21 12:00:12 -05:00
|
|
|
|
|
|
|
# We need to do this in a topologically sorted order as we want to
|
|
|
|
# generate chain IDs/sequence numbers of an event's auth events before
|
|
|
|
# the event itself.
|
|
|
|
for event_id in sorted_topologically(
|
|
|
|
events_to_calc_chain_id_for, event_to_auth_chain
|
|
|
|
):
|
|
|
|
for auth_id in event_to_auth_chain.get(event_id, []):
|
|
|
|
if event_to_types.get(event_id) == event_to_types.get(auth_id):
|
|
|
|
existing_chain_id = chain_map.get(auth_id)
|
|
|
|
if existing_chain_id:
|
|
|
|
existing_chains.add(existing_chain_id[0])
|
|
|
|
|
|
|
|
tree.append((event_id, auth_id))
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
tree.append((event_id, None))
|
|
|
|
|
|
|
|
# Fetch the current max sequence number for each existing referenced chain.
|
|
|
|
sql = """
|
|
|
|
SELECT chain_id, MAX(sequence_number) FROM event_auth_chains
|
|
|
|
WHERE %s
|
|
|
|
GROUP BY chain_id
|
|
|
|
"""
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
db_pool.engine, "chain_id", existing_chains
|
|
|
|
)
|
|
|
|
txn.execute(sql % (clause,), args)
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
chain_to_max_seq_no: Dict[Any, int] = {row[0]: row[1] for row in txn}
|
2021-01-21 12:00:12 -05:00
|
|
|
|
|
|
|
# Allocate the new events chain ID/sequence numbers.
|
|
|
|
#
|
|
|
|
# To reduce the number of calls to the database we don't allocate a
|
|
|
|
# chain ID number in the loop, instead we use a temporary `object()` for
|
|
|
|
# each new chain ID. Once we've done the loop we generate the necessary
|
|
|
|
# number of new chain IDs in one call, replacing all temporary
|
|
|
|
# objects with real allocated chain IDs.
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
unallocated_chain_ids: Set[object] = set()
|
|
|
|
new_chain_tuples: Dict[str, Tuple[Any, int]] = {}
|
2021-01-21 12:00:12 -05:00
|
|
|
for event_id, auth_event_id in tree:
|
|
|
|
# If we reference an auth_event_id we fetch the allocated chain ID,
|
|
|
|
# either from the existing `chain_map` or the newly generated
|
|
|
|
# `new_chain_tuples` map.
|
|
|
|
existing_chain_id = None
|
|
|
|
if auth_event_id:
|
|
|
|
existing_chain_id = new_chain_tuples.get(auth_event_id)
|
|
|
|
if not existing_chain_id:
|
|
|
|
existing_chain_id = chain_map[auth_event_id]
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
new_chain_tuple: Optional[Tuple[Any, int]] = None
|
2021-01-21 12:00:12 -05:00
|
|
|
if existing_chain_id:
|
|
|
|
# We found a chain ID/sequence number candidate, check its
|
|
|
|
# not already taken.
|
|
|
|
proposed_new_id = existing_chain_id[0]
|
|
|
|
proposed_new_seq = existing_chain_id[1] + 1
|
|
|
|
|
|
|
|
if chain_to_max_seq_no[proposed_new_id] < proposed_new_seq:
|
|
|
|
new_chain_tuple = (
|
|
|
|
proposed_new_id,
|
|
|
|
proposed_new_seq,
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we need to start a new chain we allocate a temporary chain ID.
|
|
|
|
if not new_chain_tuple:
|
|
|
|
new_chain_tuple = (object(), 1)
|
|
|
|
unallocated_chain_ids.add(new_chain_tuple[0])
|
|
|
|
|
|
|
|
new_chain_tuples[event_id] = new_chain_tuple
|
|
|
|
chain_to_max_seq_no[new_chain_tuple[0]] = new_chain_tuple[1]
|
|
|
|
|
|
|
|
# Generate new chain IDs for all unallocated chain IDs.
|
2021-02-24 05:13:53 -05:00
|
|
|
newly_allocated_chain_ids = event_chain_id_gen.get_next_mult_txn(
|
2021-01-21 12:00:12 -05:00
|
|
|
txn, len(unallocated_chain_ids)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Map from potentially temporary chain ID to real chain ID
|
2021-07-15 12:46:54 -04:00
|
|
|
chain_id_to_allocated_map: Dict[Any, int] = dict(
|
2021-01-21 12:00:12 -05:00
|
|
|
zip(unallocated_chain_ids, newly_allocated_chain_ids)
|
2021-07-15 12:46:54 -04:00
|
|
|
)
|
2021-01-21 12:00:12 -05:00
|
|
|
chain_id_to_allocated_map.update((c, c) for c in existing_chains)
|
|
|
|
|
|
|
|
return {
|
|
|
|
event_id: (chain_id_to_allocated_map[chain_id], seq)
|
|
|
|
for event_id, (chain_id, seq) in new_chain_tuples.items()
|
|
|
|
}
|
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
def _persist_transaction_ids_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
2022-05-10 14:07:48 -04:00
|
|
|
) -> None:
|
2021-02-16 17:32:34 -05:00
|
|
|
"""Persist the mapping from transaction IDs to event IDs (if defined)."""
|
2020-10-13 07:07:56 -04:00
|
|
|
|
|
|
|
to_insert = []
|
|
|
|
for event, _ in events_and_contexts:
|
|
|
|
token_id = getattr(event.internal_metadata, "token_id", None)
|
|
|
|
txn_id = getattr(event.internal_metadata, "txn_id", None)
|
|
|
|
if token_id and txn_id:
|
|
|
|
to_insert.append(
|
2022-01-13 19:44:18 -05:00
|
|
|
(
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
event.sender,
|
|
|
|
token_id,
|
|
|
|
txn_id,
|
|
|
|
self._clock.time_msec(),
|
|
|
|
)
|
2020-10-13 07:07:56 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if to_insert:
|
|
|
|
self.db_pool.simple_insert_many_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
table="event_txn_id",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=(
|
|
|
|
"event_id",
|
|
|
|
"room_id",
|
|
|
|
"user_id",
|
|
|
|
"token_id",
|
|
|
|
"txn_id",
|
|
|
|
"inserted_ts",
|
|
|
|
),
|
2021-02-16 17:32:34 -05:00
|
|
|
values=to_insert,
|
2020-10-13 07:07:56 -04:00
|
|
|
)
|
|
|
|
|
2022-04-12 09:23:43 -04:00
|
|
|
async def update_current_state(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
state_delta: DeltaState,
|
|
|
|
stream_id: int,
|
|
|
|
) -> None:
|
|
|
|
"""Update the current state stored in the datatabase for the given room"""
|
|
|
|
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"update_current_state",
|
|
|
|
self._update_current_state_txn,
|
|
|
|
state_delta_by_room={room_id: state_delta},
|
|
|
|
stream_id=stream_id,
|
|
|
|
)
|
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
def _update_current_state_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
state_delta_by_room: Dict[str, DeltaState],
|
|
|
|
stream_id: int,
|
2022-05-10 14:07:48 -04:00
|
|
|
) -> None:
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, delta_state in state_delta_by_room.items():
|
2020-01-20 13:07:20 -05:00
|
|
|
to_delete = delta_state.to_delete
|
|
|
|
to_insert = delta_state.to_insert
|
2017-01-20 10:40:04 -05:00
|
|
|
|
2022-02-15 09:26:28 -05:00
|
|
|
# Figure out the changes of membership to invalidate the
|
|
|
|
# `get_rooms_for_user` cache.
|
|
|
|
# We find out which membership events we may have deleted
|
|
|
|
# and which we have added, then we invalidate the caches for all
|
|
|
|
# those users.
|
|
|
|
members_changed = {
|
|
|
|
state_key
|
|
|
|
for ev_type, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
if ev_type == EventTypes.Member
|
|
|
|
}
|
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
if delta_state.no_longer_in_room:
|
|
|
|
# Server is no longer in the room so we delete the room from
|
|
|
|
# current_state_events, being careful we've already updated the
|
|
|
|
# rooms.room_version column (which gets populated in a
|
|
|
|
# background task).
|
|
|
|
self._upsert_room_version_txn(txn, room_id)
|
|
|
|
|
|
|
|
# Before deleting we populate the current_state_delta_stream
|
|
|
|
# so that async background tasks get told what happened.
|
|
|
|
sql = """
|
|
|
|
INSERT INTO current_state_delta_stream
|
2020-10-09 08:10:33 -04:00
|
|
|
(stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id)
|
|
|
|
SELECT ?, ?, room_id, type, state_key, null, event_id
|
2020-01-29 06:01:32 -05:00
|
|
|
FROM current_state_events
|
|
|
|
WHERE room_id = ?
|
|
|
|
"""
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (stream_id, self._instance_name, room_id))
|
2020-01-29 06:01:32 -05:00
|
|
|
|
2022-02-15 09:26:28 -05:00
|
|
|
# We also want to invalidate the membership caches for users
|
|
|
|
# that were in the room.
|
|
|
|
users_in_room = self.store.get_users_in_room_txn(txn, room_id)
|
|
|
|
members_changed.update(users_in_room)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
table="current_state_events",
|
|
|
|
keyvalues={"room_id": room_id},
|
2017-01-20 10:40:04 -05:00
|
|
|
)
|
2020-01-29 06:01:32 -05:00
|
|
|
else:
|
|
|
|
# We're still in the room, so we update the current state as normal.
|
|
|
|
|
|
|
|
# First we add entries to the current_state_delta_stream. We
|
|
|
|
# do this before updating the current_state_events table so
|
|
|
|
# that we can use it to calculate the `prev_event_id`. (This
|
|
|
|
# allows us to not have to pull out the existing state
|
|
|
|
# unnecessarily).
|
|
|
|
#
|
|
|
|
# The stream_id for the update is chosen to be the minimum of the stream_ids
|
|
|
|
# for the batch of the events that we are persisting; that means we do not
|
|
|
|
# end up in a situation where workers see events before the
|
|
|
|
# current_state_delta updates.
|
|
|
|
#
|
|
|
|
sql = """
|
|
|
|
INSERT INTO current_state_delta_stream
|
2020-10-09 08:10:33 -04:00
|
|
|
(stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id)
|
|
|
|
SELECT ?, ?, ?, ?, ?, ?, (
|
2020-01-29 06:01:32 -05:00
|
|
|
SELECT event_id FROM current_state_events
|
|
|
|
WHERE room_id = ? AND type = ? AND state_key = ?
|
2019-03-28 09:37:16 -04:00
|
|
|
)
|
2020-01-29 06:01:32 -05:00
|
|
|
"""
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-29 06:01:32 -05:00
|
|
|
sql,
|
2019-03-28 09:37:16 -04:00
|
|
|
(
|
2020-01-29 06:01:32 -05:00
|
|
|
(
|
|
|
|
stream_id,
|
2020-10-09 08:10:33 -04:00
|
|
|
self._instance_name,
|
2020-01-29 06:01:32 -05:00
|
|
|
room_id,
|
|
|
|
etype,
|
|
|
|
state_key,
|
|
|
|
to_insert.get((etype, state_key)),
|
|
|
|
room_id,
|
|
|
|
etype,
|
|
|
|
state_key,
|
|
|
|
)
|
|
|
|
for etype, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
# Now we actually update the current_state_events table
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-29 06:01:32 -05:00
|
|
|
"DELETE FROM current_state_events"
|
|
|
|
" WHERE room_id = ? AND type = ? AND state_key = ?",
|
|
|
|
(
|
|
|
|
(room_id, etype, state_key)
|
|
|
|
for etype, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
),
|
|
|
|
)
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
# We include the membership in the current state table, hence we do
|
|
|
|
# a lookup when we insert. This assumes that all events have already
|
|
|
|
# been inserted into room_memberships.
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-29 06:01:32 -05:00
|
|
|
"""INSERT INTO current_state_events
|
|
|
|
(room_id, type, state_key, event_id, membership)
|
|
|
|
VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
|
|
|
|
""",
|
|
|
|
[
|
|
|
|
(room_id, key[0], key[1], ev_id, ev_id)
|
2020-06-15 07:03:36 -04:00
|
|
|
for key, ev_id in to_insert.items()
|
2020-01-29 06:01:32 -05:00
|
|
|
],
|
|
|
|
)
|
2017-01-20 10:40:04 -05:00
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
# We now update `local_current_membership`. We do this regardless
|
|
|
|
# of whether we're still in the room or not to handle the case where
|
|
|
|
# e.g. we just got banned (where we need to record that fact here).
|
2017-01-20 10:40:04 -05:00
|
|
|
|
2020-01-15 09:59:33 -05:00
|
|
|
# Note: Do we really want to delete rows here (that we do not
|
|
|
|
# subsequently reinsert below)? While technically correct it means
|
|
|
|
# we have no record of the fact the user *was* a member of the
|
|
|
|
# room but got, say, state reset out of it.
|
|
|
|
if to_delete or to_insert:
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-15 09:59:33 -05:00
|
|
|
"DELETE FROM local_current_membership"
|
|
|
|
" WHERE room_id = ? AND user_id = ?",
|
|
|
|
(
|
|
|
|
(room_id, state_key)
|
|
|
|
for etype, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
if etype == EventTypes.Member and self.is_mine_id(state_key)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
if to_insert:
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-15 09:59:33 -05:00
|
|
|
"""INSERT INTO local_current_membership
|
|
|
|
(room_id, user_id, event_id, membership)
|
|
|
|
VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
|
|
|
|
""",
|
|
|
|
[
|
|
|
|
(room_id, key[1], ev_id, ev_id)
|
|
|
|
for key, ev_id in to_insert.items()
|
|
|
|
if key[0] == EventTypes.Member and self.is_mine_id(key[1])
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2019-01-30 05:53:17 -05:00
|
|
|
txn.call_after(
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._curr_state_delta_stream_cache.entity_has_changed,
|
2019-03-28 09:37:16 -04:00
|
|
|
room_id,
|
2019-04-02 07:42:39 -04:00
|
|
|
stream_id,
|
2019-01-30 05:53:17 -05:00
|
|
|
)
|
2017-06-13 04:56:18 -04:00
|
|
|
|
2019-01-30 05:53:17 -05:00
|
|
|
# Invalidate the various caches
|
|
|
|
|
2019-04-02 07:42:39 -04:00
|
|
|
for member in members_changed:
|
|
|
|
txn.call_after(
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.get_rooms_for_user_with_stream_ordering.invalidate,
|
|
|
|
(member,),
|
2019-04-02 07:42:39 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._invalidate_state_caches_and_stream(
|
|
|
|
txn, room_id, members_changed
|
|
|
|
)
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None:
|
2020-01-29 06:01:32 -05:00
|
|
|
"""Update the room version in the database based off current state
|
|
|
|
events.
|
|
|
|
|
|
|
|
This is used when we're about to delete current state and we want to
|
|
|
|
ensure that the `rooms.room_version` column is up to date.
|
|
|
|
"""
|
|
|
|
|
|
|
|
sql = """
|
|
|
|
SELECT json FROM event_json
|
|
|
|
INNER JOIN current_state_events USING (room_id, event_id)
|
|
|
|
WHERE room_id = ? AND type = ? AND state_key = ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (room_id, EventTypes.Create, ""))
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row:
|
2020-07-16 11:32:19 -04:00
|
|
|
event_json = db_to_json(row[0])
|
2020-01-29 06:01:32 -05:00
|
|
|
content = event_json.get("content", {})
|
|
|
|
creator = content.get("creator")
|
|
|
|
room_version_id = content.get("room_version", RoomVersions.V1.identifier)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-01-29 06:01:32 -05:00
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={"room_version": room_version_id},
|
|
|
|
insertion_values={"is_public": False, "creator": creator},
|
|
|
|
)
|
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
def _update_forward_extremities_txn(
|
2022-02-25 05:19:49 -05:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
new_forward_extremities: Dict[str, Set[str]],
|
|
|
|
max_stream_order: int,
|
2022-05-10 14:07:48 -04:00
|
|
|
) -> None:
|
2021-04-20 06:50:49 -04:00
|
|
|
for room_id in new_forward_extremities.keys():
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
|
2017-01-20 09:28:53 -05:00
|
|
|
)
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store.get_latest_event_ids_in_room.invalidate, (room_id,)
|
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2017-01-20 09:28:53 -05:00
|
|
|
txn,
|
|
|
|
table="event_forward_extremities",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=("event_id", "room_id"),
|
2017-01-20 09:28:53 -05:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(ev_id, room_id)
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, new_extrem in new_forward_extremities.items()
|
2017-01-20 09:28:53 -05:00
|
|
|
for ev_id in new_extrem
|
|
|
|
],
|
|
|
|
)
|
|
|
|
# We now insert into stream_ordering_to_exterm a mapping from room_id,
|
|
|
|
# new stream_ordering to new forward extremeties in the room.
|
|
|
|
# This allows us to later efficiently look up the forward extremeties
|
|
|
|
# for a room before a given stream_ordering
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2017-01-20 09:28:53 -05:00
|
|
|
txn,
|
|
|
|
table="stream_ordering_to_exterm",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=("room_id", "event_id", "stream_ordering"),
|
2017-01-20 09:28:53 -05:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(room_id, event_id, max_stream_order)
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, new_extrem in new_forward_extremities.items()
|
2017-01-20 09:28:53 -05:00
|
|
|
for event_id in new_extrem
|
2019-03-28 09:37:16 -04:00
|
|
|
],
|
2017-01-20 09:28:53 -05:00
|
|
|
)
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
@classmethod
|
2020-09-11 07:22:55 -04:00
|
|
|
def _filter_events_and_contexts_for_duplicates(
|
|
|
|
cls, events_and_contexts: List[Tuple[EventBase, EventContext]]
|
|
|
|
) -> List[Tuple[EventBase, EventContext]]:
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Ensure that we don't have the same event twice.
|
|
|
|
|
|
|
|
Pick the earliest non-outlier if there is one, else the earliest one.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]):
|
|
|
|
Returns:
|
|
|
|
list[(EventBase, EventContext)]: filtered list
|
|
|
|
"""
|
2021-07-15 12:46:54 -04:00
|
|
|
new_events_and_contexts: OrderedDict[
|
|
|
|
str, Tuple[EventBase, EventContext]
|
|
|
|
] = OrderedDict()
|
2016-08-03 06:23:39 -04:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
prev_event_context = new_events_and_contexts.get(event.event_id)
|
|
|
|
if prev_event_context:
|
|
|
|
if not event.internal_metadata.is_outlier():
|
|
|
|
if prev_event_context[0].internal_metadata.is_outlier():
|
|
|
|
# To ensure correct ordering we pop, as OrderedDict is
|
|
|
|
# ordered by first insertion.
|
|
|
|
new_events_and_contexts.pop(event.event_id, None)
|
|
|
|
new_events_and_contexts[event.event_id] = (event, context)
|
|
|
|
else:
|
|
|
|
new_events_and_contexts[event.event_id] = (event, context)
|
2018-05-31 05:03:47 -04:00
|
|
|
return list(new_events_and_contexts.values())
|
2016-08-03 06:23:39 -04:00
|
|
|
|
2020-09-11 07:22:55 -04:00
|
|
|
def _update_room_depths_txn(
|
|
|
|
self,
|
2022-05-10 14:07:48 -04:00
|
|
|
txn: LoggingTransaction,
|
2020-09-11 07:22:55 -04:00
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
2022-05-10 14:07:48 -04:00
|
|
|
) -> None:
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Update min_depth for each room
|
2016-08-03 06:23:39 -04:00
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
|
|
|
"""
|
2021-07-15 12:46:54 -04:00
|
|
|
depth_updates: Dict[str, int] = {}
|
2016-02-09 11:19:15 -05:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
# Remove the any existing cache entries for the event_ids
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(self.store._invalidate_get_event_cache, event.event_id)
|
2021-11-29 17:01:54 -05:00
|
|
|
# Then update the `stream_ordering` position to mark the latest
|
|
|
|
# event as the front of the room. This should not be done for
|
|
|
|
# backfilled events because backfilled events have negative
|
|
|
|
# stream_ordering and happened in the past so we know that we don't
|
|
|
|
# need to update the stream_ordering tip/front for the room.
|
|
|
|
assert event.internal_metadata.stream_ordering is not None
|
|
|
|
if event.internal_metadata.stream_ordering >= 0:
|
2016-01-28 10:02:37 -05:00
|
|
|
txn.call_after(
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._events_stream_cache.entity_has_changed,
|
2019-03-28 09:37:16 -04:00
|
|
|
event.room_id,
|
|
|
|
event.internal_metadata.stream_ordering,
|
2016-01-28 10:02:37 -05:00
|
|
|
)
|
|
|
|
|
2016-07-25 13:44:30 -04:00
|
|
|
if not event.internal_metadata.is_outlier() and not context.rejected:
|
2016-02-09 11:19:15 -05:00
|
|
|
depth_updates[event.room_id] = max(
|
|
|
|
event.depth, depth_updates.get(event.room_id, event.depth)
|
|
|
|
)
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, depth in depth_updates.items():
|
2015-06-25 12:18:19 -04:00
|
|
|
self._update_min_depth_for_room_txn(txn, room_id, depth)
|
|
|
|
|
2022-01-19 14:45:36 -05:00
|
|
|
def _update_outliers_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
) -> List[Tuple[EventBase, EventContext]]:
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Update any outliers with new event info.
|
|
|
|
|
2022-01-19 14:45:36 -05:00
|
|
|
This turns outliers into ex-outliers (unless the new event was rejected), and
|
|
|
|
also removes any other events we have already seen from the list.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Args:
|
2022-01-19 14:45:36 -05:00
|
|
|
txn: db connection
|
|
|
|
events_and_contexts: events we are persisting
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Returns:
|
2022-01-19 14:45:36 -05:00
|
|
|
new list, without events which are already in the events table.
|
2017-03-17 07:51:13 -04:00
|
|
|
"""
|
2015-06-25 12:18:19 -04:00
|
|
|
txn.execute(
|
2019-03-28 09:37:16 -04:00
|
|
|
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
|
|
|
|
% (",".join(["?"] * len(events_and_contexts)),),
|
|
|
|
[event.event_id for event, _ in events_and_contexts],
|
2015-03-20 09:52:56 -04:00
|
|
|
)
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2022-01-19 14:45:36 -05:00
|
|
|
have_persisted: Dict[str, bool] = {
|
|
|
|
event_id: outlier for event_id, outlier in txn
|
|
|
|
}
|
2015-06-25 12:18:19 -04:00
|
|
|
|
|
|
|
to_remove = set()
|
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if event.event_id not in have_persisted:
|
|
|
|
continue
|
|
|
|
|
|
|
|
to_remove.add(event)
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
if context.rejected:
|
2022-01-19 14:45:36 -05:00
|
|
|
# If the incoming event is rejected then we don't care if the event
|
|
|
|
# was an outlier or not - what we have is at least as good.
|
2017-03-17 07:51:13 -04:00
|
|
|
continue
|
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
outlier_persisted = have_persisted[event.event_id]
|
|
|
|
if not event.internal_metadata.is_outlier() and outlier_persisted:
|
2016-07-26 05:49:52 -04:00
|
|
|
# We received a copy of an event that we had already stored as
|
2022-01-19 14:45:36 -05:00
|
|
|
# an outlier in the database. We now have some state at that event
|
2016-07-26 05:49:52 -04:00
|
|
|
# so we need to update the state_groups table with that state.
|
2022-01-19 14:45:36 -05:00
|
|
|
#
|
|
|
|
# Note that we do not update the stream_ordering of the event in this
|
|
|
|
# scenario. XXX: does this cause bugs? It will mean we won't send such
|
|
|
|
# events down /sync. In general they will be historical events, so that
|
|
|
|
# doesn't matter too much, but that is not always the case.
|
|
|
|
|
|
|
|
logger.info("Updating state for ex-outlier event %s", event.event_id)
|
2016-07-26 05:49:52 -04:00
|
|
|
|
2018-02-06 09:31:24 -05:00
|
|
|
# insert into event_to_state_groups.
|
2016-09-02 05:41:38 -04:00
|
|
|
try:
|
2018-02-06 09:31:24 -05:00
|
|
|
self._store_event_state_mappings_txn(txn, ((event, context),))
|
2016-09-02 05:41:38 -04:00
|
|
|
except Exception:
|
|
|
|
logger.exception("")
|
|
|
|
raise
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Add an entry to the ex_outlier_stream table to replicate the
|
|
|
|
# change in outlier status to our workers.
|
2016-03-30 12:19:56 -04:00
|
|
|
stream_order = event.internal_metadata.stream_ordering
|
2016-08-31 05:09:46 -04:00
|
|
|
state_group_id = context.state_group
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2016-03-30 12:19:56 -04:00
|
|
|
txn,
|
|
|
|
table="ex_outlier_stream",
|
|
|
|
values={
|
|
|
|
"event_stream_ordering": stream_order,
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"state_group": state_group_id,
|
2020-10-09 08:10:33 -04:00
|
|
|
"instance_name": self._instance_name,
|
2019-03-28 09:37:16 -04:00
|
|
|
},
|
2016-03-30 12:19:56 -04:00
|
|
|
)
|
|
|
|
|
2019-11-21 07:00:14 -05:00
|
|
|
sql = "UPDATE events SET outlier = ? WHERE event_id = ?"
|
2019-03-28 09:37:16 -04:00
|
|
|
txn.execute(sql, (False, event.event_id))
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Update the event_backward_extremities table now that this
|
|
|
|
# event isn't an outlier any more.
|
2017-01-20 09:40:31 -05:00
|
|
|
self._update_backward_extremeties(txn, [event])
|
2015-09-15 11:34:42 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
|
2015-03-24 12:20:26 -04:00
|
|
|
|
2021-12-10 10:02:33 -05:00
|
|
|
def _store_event_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: Collection[Tuple[EventBase, EventContext]],
|
|
|
|
) -> None:
|
2021-01-11 08:57:33 -05:00
|
|
|
"""Insert new events into the event, event_json, redaction and
|
|
|
|
state_events tables.
|
2017-03-17 07:51:13 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
if not events_and_contexts:
|
|
|
|
# nothing to do here
|
|
|
|
return
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def event_dict(event: EventBase) -> JsonDict:
|
2017-03-24 06:57:02 -04:00
|
|
|
d = event.get_dict()
|
|
|
|
d.pop("redacted", None)
|
|
|
|
d.pop("redacted_because", None)
|
|
|
|
return d
|
2016-08-04 10:02:15 -04:00
|
|
|
|
2022-01-13 19:44:18 -05:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2015-06-25 12:18:19 -04:00
|
|
|
txn,
|
|
|
|
table="event_json",
|
2021-12-10 10:02:33 -05:00
|
|
|
keys=("event_id", "room_id", "internal_metadata", "json", "format_version"),
|
|
|
|
values=(
|
|
|
|
(
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
json_encoder.encode(event.internal_metadata.get_dict()),
|
|
|
|
json_encoder.encode(event_dict(event)),
|
|
|
|
event.format_version,
|
|
|
|
)
|
2015-06-25 12:18:19 -04:00
|
|
|
for event, _ in events_and_contexts
|
2021-12-10 10:02:33 -05:00
|
|
|
),
|
2015-04-15 05:24:24 -04:00
|
|
|
)
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2022-01-13 19:44:18 -05:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2015-06-25 12:18:19 -04:00
|
|
|
txn,
|
|
|
|
table="events",
|
2021-12-10 10:02:33 -05:00
|
|
|
keys=(
|
|
|
|
"instance_name",
|
|
|
|
"stream_ordering",
|
|
|
|
"topological_ordering",
|
|
|
|
"depth",
|
|
|
|
"event_id",
|
|
|
|
"room_id",
|
|
|
|
"type",
|
|
|
|
"processed",
|
|
|
|
"outlier",
|
|
|
|
"origin_server_ts",
|
|
|
|
"received_ts",
|
|
|
|
"sender",
|
|
|
|
"contains_url",
|
2022-01-21 07:21:28 -05:00
|
|
|
"state_key",
|
|
|
|
"rejection_reason",
|
2021-12-10 10:02:33 -05:00
|
|
|
),
|
|
|
|
values=(
|
|
|
|
(
|
|
|
|
self._instance_name,
|
|
|
|
event.internal_metadata.stream_ordering,
|
|
|
|
event.depth, # topological_ordering
|
|
|
|
event.depth, # depth
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
event.type,
|
|
|
|
True, # processed
|
|
|
|
event.internal_metadata.is_outlier(),
|
|
|
|
int(event.origin_server_ts),
|
|
|
|
self._clock.time_msec(),
|
|
|
|
event.sender,
|
|
|
|
"url" in event.content and isinstance(event.content["url"], str),
|
2022-01-21 07:21:28 -05:00
|
|
|
event.get_state_key(),
|
|
|
|
context.rejected or None,
|
2021-12-10 10:02:33 -05:00
|
|
|
)
|
2022-01-21 07:21:28 -05:00
|
|
|
for event, context in events_and_contexts
|
2021-12-10 10:02:33 -05:00
|
|
|
),
|
2015-03-20 09:52:56 -04:00
|
|
|
)
|
|
|
|
|
2021-04-16 09:44:55 -04:00
|
|
|
# If we're persisting an unredacted event we go and ensure
|
|
|
|
# that we mark any redactions that reference this event as
|
|
|
|
# requiring censoring.
|
2021-09-06 05:14:07 -04:00
|
|
|
unredacted_events = [
|
|
|
|
event.event_id
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
if not event.internal_metadata.is_redacted()
|
|
|
|
]
|
|
|
|
sql = "UPDATE redactions SET have_censored = ? WHERE "
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine,
|
|
|
|
"redacts",
|
|
|
|
unredacted_events,
|
2021-04-16 09:44:55 -04:00
|
|
|
)
|
2021-09-06 05:14:07 -04:00
|
|
|
txn.execute(sql + clause, [False] + args)
|
2019-10-01 06:05:48 -04:00
|
|
|
|
2022-01-13 19:44:18 -05:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2021-12-10 10:02:33 -05:00
|
|
|
txn,
|
|
|
|
table="state_events",
|
|
|
|
keys=("event_id", "room_id", "type", "state_key"),
|
|
|
|
values=(
|
|
|
|
(event.event_id, event.room_id, event.type, event.state_key)
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
if event.is_state()
|
|
|
|
),
|
2021-01-11 08:57:33 -05:00
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _store_rejected_events_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
) -> List[Tuple[EventBase, EventContext]]:
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Add rows to the 'rejections' table for received events which were
|
|
|
|
rejected
|
|
|
|
|
|
|
|
Args:
|
2022-05-10 14:07:48 -04:00
|
|
|
txn: db connection
|
|
|
|
events_and_contexts: events we are persisting
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Returns:
|
2022-05-10 14:07:48 -04:00
|
|
|
new list, without the rejected events.
|
2017-03-17 07:51:13 -04:00
|
|
|
"""
|
2016-07-26 05:49:52 -04:00
|
|
|
# Remove the rejected events from the list now that we've added them
|
|
|
|
# to the events table and the events_json table.
|
2016-07-25 13:44:30 -04:00
|
|
|
to_remove = set()
|
2016-07-25 11:12:16 -04:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if context.rejected:
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert the event_id into the rejections table
|
2022-01-21 07:21:28 -05:00
|
|
|
# (events.rejection_reason has already been done)
|
2019-03-28 09:37:16 -04:00
|
|
|
self._store_rejections_txn(txn, event.event_id, context.rejected)
|
2016-07-26 05:49:52 -04:00
|
|
|
to_remove.add(event)
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
def _update_metadata_tables_txn(
|
2021-11-29 17:01:54 -05:00
|
|
|
self,
|
2022-02-24 06:52:28 -05:00
|
|
|
txn: LoggingTransaction,
|
2021-11-29 17:01:54 -05:00
|
|
|
*,
|
2022-02-24 06:52:28 -05:00
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
all_events_and_contexts: List[Tuple[EventBase, EventContext]],
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates: bool = False,
|
2022-05-10 14:07:48 -04:00
|
|
|
) -> None:
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Update all the miscellaneous tables for new events
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
2018-02-20 07:33:04 -05:00
|
|
|
all_events_and_contexts (list[(EventBase, EventContext)]): all
|
|
|
|
events that we were going to persist. This includes events
|
|
|
|
we've already persisted, etc, that wouldn't appear in
|
|
|
|
events_and_context.
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates: Stop the local_current_membership
|
|
|
|
from being updated by these events. This should be set to True
|
|
|
|
for backfilled events because backfilled events in the past do
|
|
|
|
not affect the current local state.
|
2017-03-17 07:51:13 -04:00
|
|
|
"""
|
|
|
|
|
2018-02-20 07:29:50 -05:00
|
|
|
# Insert all the push actions into the event_push_actions table.
|
|
|
|
self._set_push_actions_for_event_and_users_txn(
|
|
|
|
txn,
|
|
|
|
events_and_contexts=events_and_contexts,
|
2018-02-20 07:33:04 -05:00
|
|
|
all_events_and_contexts=all_events_and_contexts,
|
2018-02-20 07:29:50 -05:00
|
|
|
)
|
|
|
|
|
2016-07-25 13:44:30 -04:00
|
|
|
if not events_and_contexts:
|
2017-03-17 07:51:13 -04:00
|
|
|
# nothing to do here
|
2016-07-25 13:44:30 -04:00
|
|
|
return
|
|
|
|
|
2021-04-20 06:50:49 -04:00
|
|
|
for event, _ in events_and_contexts:
|
2016-07-26 06:05:39 -04:00
|
|
|
if event.type == EventTypes.Redaction and event.redacts is not None:
|
|
|
|
# Remove the entries in the event_push_actions table for the
|
|
|
|
# redacted event.
|
|
|
|
self._remove_push_actions_for_event_id_txn(
|
|
|
|
txn, event.room_id, event.redacts
|
|
|
|
)
|
|
|
|
|
2019-05-20 05:13:05 -04:00
|
|
|
# Remove from relations table.
|
2022-03-07 09:00:05 -05:00
|
|
|
self._handle_redact_relations(txn, event.redacts)
|
2019-05-20 05:13:05 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Update the event_forward_extremities, event_backward_extremities and
|
|
|
|
# event_edges tables.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._handle_mult_prev_events(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, events=[event for event, _ in events_and_contexts]
|
2016-07-25 13:44:30 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
for event, _ in events_and_contexts:
|
|
|
|
if event.type == EventTypes.Name:
|
2019-08-21 08:16:28 -04:00
|
|
|
# Insert into the event_search table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_name_txn(txn, event)
|
|
|
|
elif event.type == EventTypes.Topic:
|
2019-08-21 08:16:28 -04:00
|
|
|
# Insert into the event_search table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_topic_txn(txn, event)
|
|
|
|
elif event.type == EventTypes.Message:
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert into the event_search table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_message_txn(txn, event)
|
2020-01-23 10:19:03 -05:00
|
|
|
elif event.type == EventTypes.Redaction and event.redacts is not None:
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert into the redactions table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_redaction(txn, event)
|
2019-11-04 12:09:22 -05:00
|
|
|
elif event.type == EventTypes.Retention:
|
|
|
|
# Update the room_retention table.
|
|
|
|
self._store_retention_policy_for_room_txn(txn, event)
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-05-14 11:59:21 -04:00
|
|
|
self._handle_event_relations(txn, event)
|
|
|
|
|
2021-07-28 11:46:37 -04:00
|
|
|
self._handle_insertion_event(txn, event)
|
2021-09-21 16:06:28 -04:00
|
|
|
self._handle_batch_event(txn, event)
|
2021-07-28 11:46:37 -04:00
|
|
|
|
2019-10-29 14:35:49 -04:00
|
|
|
# Store the labels for this event.
|
2019-11-01 12:22:44 -04:00
|
|
|
labels = event.content.get(EventContentFields.LABELS)
|
2019-10-29 14:35:49 -04:00
|
|
|
if labels:
|
2019-11-01 07:47:28 -04:00
|
|
|
self.insert_labels_for_event_txn(
|
|
|
|
txn, event.event_id, labels, event.room_id, event.depth
|
|
|
|
)
|
2019-10-29 14:35:49 -04:00
|
|
|
|
2019-12-03 14:19:45 -05:00
|
|
|
if self._ephemeral_messages_enabled:
|
|
|
|
# If there's an expiry timestamp on the event, store it.
|
|
|
|
expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
|
|
|
|
if isinstance(expiry_ts, int) and not event.is_state():
|
|
|
|
self._insert_event_expiry_txn(txn, event.event_id, expiry_ts)
|
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert into the room_memberships table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_members_txn(
|
|
|
|
txn,
|
|
|
|
[
|
|
|
|
event
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
if event.type == EventTypes.Member
|
|
|
|
],
|
2021-11-29 17:01:54 -05:00
|
|
|
inhibit_local_membership_updates=inhibit_local_membership_updates,
|
2016-07-25 13:44:30 -04:00
|
|
|
)
|
|
|
|
|
2016-07-26 06:05:39 -04:00
|
|
|
# Prefill the event cache
|
2016-06-06 06:08:12 -04:00
|
|
|
self._add_to_cache(txn, events_and_contexts)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _add_to_cache(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
) -> None:
|
2016-06-06 06:08:12 -04:00
|
|
|
to_prefill = []
|
|
|
|
|
|
|
|
rows = []
|
2019-10-02 14:07:07 -04:00
|
|
|
|
2021-09-10 05:16:52 -04:00
|
|
|
ev_map = {e.event_id: e for e, _ in events_and_contexts}
|
|
|
|
if not ev_map:
|
|
|
|
return
|
2016-06-06 06:08:12 -04:00
|
|
|
|
2021-09-10 05:16:52 -04:00
|
|
|
sql = (
|
|
|
|
"SELECT "
|
|
|
|
" e.event_id as event_id, "
|
|
|
|
" r.redacts as redacts,"
|
|
|
|
" rej.event_id as rejects "
|
|
|
|
" FROM events as e"
|
|
|
|
" LEFT JOIN rejections as rej USING (event_id)"
|
|
|
|
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
|
|
|
|
" WHERE "
|
|
|
|
)
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "e.event_id", list(ev_map)
|
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(sql + clause, args)
|
|
|
|
rows = self.db_pool.cursor_to_dict(txn)
|
|
|
|
for row in rows:
|
|
|
|
event = ev_map[row["event_id"]]
|
|
|
|
if not row["rejects"] and not row["redacts"]:
|
2021-11-26 13:41:31 -05:00
|
|
|
to_prefill.append(EventCacheEntry(event=event, redacted_event=None))
|
2016-06-06 06:08:12 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def prefill() -> None:
|
2016-06-06 06:08:12 -04:00
|
|
|
for cache_entry in to_prefill:
|
2021-11-26 13:41:31 -05:00
|
|
|
self.store._get_event_cache.set(
|
|
|
|
(cache_entry.event.event_id,), cache_entry
|
|
|
|
)
|
2019-03-28 09:37:16 -04:00
|
|
|
|
2016-06-06 06:08:12 -04:00
|
|
|
txn.call_after(prefill)
|
|
|
|
|
2022-03-10 09:03:59 -05:00
|
|
|
def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
|
2022-05-09 06:27:39 -04:00
|
|
|
"""Invalidate the caches for the redacted event.
|
|
|
|
|
|
|
|
Note that these caches are also cleared as part of event replication in
|
|
|
|
_invalidate_caches_for_event.
|
|
|
|
"""
|
|
|
|
assert event.redacts is not None
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
|
2022-03-10 09:03:59 -05:00
|
|
|
txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,))
|
|
|
|
txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,))
|
2019-10-01 08:23:34 -04:00
|
|
|
|
2021-07-09 06:03:02 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2019-10-01 08:23:34 -04:00
|
|
|
txn,
|
|
|
|
table="redactions",
|
2021-07-09 06:03:02 -04:00
|
|
|
keyvalues={"event_id": event.event_id},
|
2019-10-01 08:23:34 -04:00
|
|
|
values={
|
|
|
|
"redacts": event.redacts,
|
|
|
|
"received_ts": self._clock.time_msec(),
|
|
|
|
},
|
2015-03-20 09:52:56 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def insert_labels_for_event_txn(
|
2022-05-10 14:07:48 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
event_id: str,
|
|
|
|
labels: List[str],
|
|
|
|
room_id: str,
|
|
|
|
topological_ordering: int,
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
"""Store the mapping between an event's ID and its labels, with one row per
|
|
|
|
(event_id, label) tuple.
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
2022-05-10 14:07:48 -04:00
|
|
|
txn: The transaction to execute.
|
|
|
|
event_id: The event's ID.
|
|
|
|
labels: A list of text labels.
|
|
|
|
room_id: The ID of the room the event was sent to.
|
|
|
|
topological_ordering: The position of the event in the room's topology.
|
2019-08-29 12:38:51 -04:00
|
|
|
"""
|
2022-05-10 14:07:48 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn=txn,
|
|
|
|
table="event_labels",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=("event_id", "label", "room_id", "topological_ordering"),
|
2020-05-13 08:38:22 -04:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(event_id, label, room_id, topological_ordering) for label in labels
|
2020-05-13 08:38:22 -04:00
|
|
|
],
|
|
|
|
)
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _insert_event_expiry_txn(
|
|
|
|
self, txn: LoggingTransaction, event_id: str, expiry_ts: int
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
"""Save the expiry timestamp associated with a given event ID.
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
2022-05-10 14:07:48 -04:00
|
|
|
txn: The database transaction to use.
|
|
|
|
event_id: The event ID the expiry timestamp is associated with.
|
|
|
|
expiry_ts: The timestamp at which to expire (delete) the event.
|
2019-08-29 12:38:51 -04:00
|
|
|
"""
|
2022-05-10 14:07:48 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn=txn,
|
|
|
|
table="event_expiry",
|
|
|
|
values={"event_id": event_id, "expiry_ts": expiry_ts},
|
2019-12-04 08:52:46 -05:00
|
|
|
)
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2021-11-29 17:01:54 -05:00
|
|
|
def _store_room_members_txn(
|
2022-05-10 14:07:48 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events: List[EventBase],
|
|
|
|
*,
|
|
|
|
inhibit_local_membership_updates: bool = False,
|
|
|
|
) -> None:
|
2021-11-29 17:01:54 -05:00
|
|
|
"""
|
|
|
|
Store a room member in the database.
|
2022-05-10 14:07:48 -04:00
|
|
|
|
2021-11-29 17:01:54 -05:00
|
|
|
Args:
|
|
|
|
txn: The transaction to use.
|
|
|
|
events: List of events to store.
|
|
|
|
inhibit_local_membership_updates: Stop the local_current_membership
|
|
|
|
from being updated by these events. This should be set to True
|
|
|
|
for backfilled events because backfilled events in the past do
|
|
|
|
not affect the current local state.
|
|
|
|
"""
|
2020-09-23 11:42:14 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-12-03 14:19:45 -05:00
|
|
|
txn,
|
2020-05-13 08:38:22 -04:00
|
|
|
table="room_memberships",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=(
|
|
|
|
"event_id",
|
|
|
|
"user_id",
|
|
|
|
"sender",
|
|
|
|
"room_id",
|
|
|
|
"membership",
|
|
|
|
"display_name",
|
|
|
|
"avatar_url",
|
|
|
|
),
|
2020-05-13 08:38:22 -04:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(
|
|
|
|
event.event_id,
|
|
|
|
event.state_key,
|
|
|
|
event.user_id,
|
|
|
|
event.room_id,
|
|
|
|
event.membership,
|
|
|
|
non_null_str_or_none(event.content.get("displayname")),
|
|
|
|
non_null_str_or_none(event.content.get("avatar_url")),
|
|
|
|
)
|
2020-05-13 08:38:22 -04:00
|
|
|
for event in events
|
|
|
|
],
|
2019-12-03 14:19:45 -05:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
for event in events:
|
2022-05-10 14:07:48 -04:00
|
|
|
assert event.internal_metadata.stream_ordering is not None
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store._membership_stream_cache.entity_has_changed,
|
|
|
|
event.state_key,
|
|
|
|
event.internal_metadata.stream_ordering,
|
2016-03-01 09:49:41 -05:00
|
|
|
)
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store.get_invited_rooms_for_local_user.invalidate,
|
|
|
|
(event.state_key,),
|
2016-03-01 09:49:41 -05:00
|
|
|
)
|
2016-03-30 12:19:56 -04:00
|
|
|
|
2022-03-25 10:58:56 -04:00
|
|
|
# The `_get_membership_from_event_id` is immutable, except for the
|
|
|
|
# case where we look up an event *before* persisting it.
|
|
|
|
txn.call_after(
|
|
|
|
self.store._get_membership_from_event_id.invalidate,
|
|
|
|
(event.event_id,),
|
|
|
|
)
|
|
|
|
|
2020-07-07 09:20:40 -04:00
|
|
|
# We update the local_current_membership table only if the event is
|
|
|
|
# "current", i.e., its something that has just happened.
|
|
|
|
#
|
|
|
|
# This will usually get updated by the `current_state_events` handling,
|
|
|
|
# unless its an outlier, and an outlier is only "current" if it's an "out of
|
|
|
|
# band membership", like a remote invite or a rejection of a remote invite.
|
|
|
|
if (
|
|
|
|
self.is_mine_id(event.state_key)
|
2021-11-29 17:01:54 -05:00
|
|
|
and not inhibit_local_membership_updates
|
2020-07-07 09:20:40 -04:00
|
|
|
and event.internal_metadata.is_outlier()
|
|
|
|
and event.internal_metadata.is_out_of_band_membership()
|
|
|
|
):
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-07-07 09:20:40 -04:00
|
|
|
txn,
|
|
|
|
table="local_current_membership",
|
|
|
|
keyvalues={"room_id": event.room_id, "user_id": event.state_key},
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"membership": event.membership,
|
|
|
|
},
|
|
|
|
)
|
2016-07-05 05:28:51 -04:00
|
|
|
|
2021-11-22 12:01:47 -05:00
|
|
|
def _handle_event_relations(
|
|
|
|
self, txn: LoggingTransaction, event: EventBase
|
|
|
|
) -> None:
|
|
|
|
"""Handles inserting relation data during persistence of events
|
2019-10-30 11:12:49 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
2021-11-22 12:01:47 -05:00
|
|
|
txn: The current database transaction.
|
|
|
|
event: The event which might have relations.
|
2016-07-04 11:02:50 -04:00
|
|
|
"""
|
2022-05-16 08:42:45 -04:00
|
|
|
relation = relation_from_event(event)
|
2020-05-13 08:38:22 -04:00
|
|
|
if not relation:
|
2022-05-16 08:42:45 -04:00
|
|
|
# No relation, nothing to do.
|
2020-05-13 08:38:22 -04:00
|
|
|
return
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="event_relations",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
2022-05-16 08:42:45 -04:00
|
|
|
"relates_to_id": relation.parent_id,
|
|
|
|
"relation_type": relation.rel_type,
|
|
|
|
"aggregation_key": relation.aggregation_key,
|
2020-05-13 08:38:22 -04:00
|
|
|
},
|
2018-02-14 11:41:12 -05:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
2022-05-16 08:42:45 -04:00
|
|
|
self.store.get_relations_for_event.invalidate, (relation.parent_id,)
|
|
|
|
)
|
|
|
|
txn.call_after(
|
|
|
|
self.store.get_aggregation_groups_for_event.invalidate,
|
|
|
|
(relation.parent_id,),
|
2016-07-04 11:02:50 -04:00
|
|
|
)
|
2018-02-14 06:02:22 -05:00
|
|
|
|
2022-05-16 08:42:45 -04:00
|
|
|
if relation.rel_type == RelationTypes.REPLACE:
|
|
|
|
txn.call_after(
|
|
|
|
self.store.get_applicable_edit.invalidate, (relation.parent_id,)
|
|
|
|
)
|
2018-02-14 06:02:22 -05:00
|
|
|
|
2022-05-16 08:42:45 -04:00
|
|
|
if relation.rel_type == RelationTypes.THREAD:
|
|
|
|
txn.call_after(
|
|
|
|
self.store.get_thread_summary.invalidate, (relation.parent_id,)
|
|
|
|
)
|
2022-01-18 11:38:57 -05:00
|
|
|
# It should be safe to only invalidate the cache if the user has not
|
|
|
|
# previously participated in the thread, but that's difficult (and
|
|
|
|
# potentially error-prone) so it is always invalidated.
|
|
|
|
txn.call_after(
|
|
|
|
self.store.get_thread_participated.invalidate,
|
2022-05-16 08:42:45 -04:00
|
|
|
(relation.parent_id, event.sender),
|
2022-01-18 11:38:57 -05:00
|
|
|
)
|
2021-10-21 14:39:16 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _handle_insertion_event(
|
|
|
|
self, txn: LoggingTransaction, event: EventBase
|
|
|
|
) -> None:
|
2021-07-28 11:46:37 -04:00
|
|
|
"""Handles keeping track of insertion events and edges/connections.
|
|
|
|
Part of MSC2716.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction object
|
|
|
|
event: The event to process
|
|
|
|
"""
|
|
|
|
|
|
|
|
if event.type != EventTypes.MSC2716_INSERTION:
|
|
|
|
# Not a insertion event
|
|
|
|
return
|
|
|
|
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 01:58:49 -04:00
|
|
|
# Skip processing an insertion event if the room version doesn't
|
|
|
|
# support it or the event is not from the room creator.
|
2021-07-28 11:46:37 -04:00
|
|
|
room_version = self.store.get_room_version_txn(txn, event.room_id)
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 01:58:49 -04:00
|
|
|
room_creator = self.db_pool.simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": event.room_id},
|
|
|
|
retcol="creator",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
2021-10-05 12:51:57 -04:00
|
|
|
if not room_version.msc2716_historical and (
|
|
|
|
not self.hs.config.experimental.msc2716_enabled
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 01:58:49 -04:00
|
|
|
or event.sender != room_creator
|
|
|
|
):
|
2021-07-28 11:46:37 -04:00
|
|
|
return
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
next_batch_id = event.content.get(EventContentFields.MSC2716_NEXT_BATCH_ID)
|
|
|
|
if next_batch_id is None:
|
|
|
|
# Invalid insertion event without next batch ID
|
2021-07-28 11:46:37 -04:00
|
|
|
return
|
|
|
|
|
|
|
|
logger.debug(
|
2021-09-21 16:06:28 -04:00
|
|
|
"_handle_insertion_event (next_batch_id=%s) %s", next_batch_id, event
|
2021-07-28 11:46:37 -04:00
|
|
|
)
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
# Keep track of the insertion event and the batch ID
|
2021-07-28 11:46:37 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="insertion_events",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
2021-09-21 16:06:28 -04:00
|
|
|
"next_batch_id": next_batch_id,
|
2021-07-28 11:46:37 -04:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# Insert an edge for every prev_event connection
|
2021-11-02 09:55:52 -04:00
|
|
|
for prev_event_id in event.prev_event_ids():
|
2021-07-28 11:46:37 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="insertion_event_edges",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"insertion_prev_event_id": prev_event_id,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _handle_batch_event(self, txn: LoggingTransaction, event: EventBase) -> None:
|
2021-09-21 16:06:28 -04:00
|
|
|
"""Handles inserting the batch edges/connections between the batch event
|
2021-07-28 11:46:37 -04:00
|
|
|
and an insertion event. Part of MSC2716.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction object
|
|
|
|
event: The event to process
|
|
|
|
"""
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
if event.type != EventTypes.MSC2716_BATCH:
|
|
|
|
# Not a batch event
|
2021-07-28 11:46:37 -04:00
|
|
|
return
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
# Skip processing a batch event if the room version doesn't
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 01:58:49 -04:00
|
|
|
# support it or the event is not from the room creator.
|
2021-07-28 11:46:37 -04:00
|
|
|
room_version = self.store.get_room_version_txn(txn, event.room_id)
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 01:58:49 -04:00
|
|
|
room_creator = self.db_pool.simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": event.room_id},
|
|
|
|
retcol="creator",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
2021-10-05 12:51:57 -04:00
|
|
|
if not room_version.msc2716_historical and (
|
|
|
|
not self.hs.config.experimental.msc2716_enabled
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 01:58:49 -04:00
|
|
|
or event.sender != room_creator
|
|
|
|
):
|
2021-07-28 11:46:37 -04:00
|
|
|
return
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
batch_id = event.content.get(EventContentFields.MSC2716_BATCH_ID)
|
|
|
|
if batch_id is None:
|
|
|
|
# Invalid batch event without a batch ID
|
2021-07-28 11:46:37 -04:00
|
|
|
return
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
logger.debug("_handle_batch_event batch_id=%s %s", batch_id, event)
|
2021-07-28 11:46:37 -04:00
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
# Keep track of the insertion event and the batch ID
|
2021-07-28 11:46:37 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
|
|
|
txn,
|
2021-09-21 16:06:28 -04:00
|
|
|
table="batch_events",
|
2021-07-28 11:46:37 -04:00
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
2021-09-21 16:06:28 -04:00
|
|
|
"batch_id": batch_id,
|
2021-07-28 11:46:37 -04:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
# When we receive an event with a `batch_id` referencing the
|
|
|
|
# `next_batch_id` of the insertion event, we can remove it from the
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
# `insertion_event_extremities` table.
|
|
|
|
sql = """
|
|
|
|
DELETE FROM insertion_event_extremities WHERE event_id IN (
|
|
|
|
SELECT event_id FROM insertion_events
|
2021-09-21 16:06:28 -04:00
|
|
|
WHERE next_batch_id = ?
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
)
|
|
|
|
"""
|
|
|
|
|
2021-09-21 16:06:28 -04:00
|
|
|
txn.execute(sql, (batch_id,))
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
|
2022-03-07 09:00:05 -05:00
|
|
|
def _handle_redact_relations(
|
|
|
|
self, txn: LoggingTransaction, redacted_event_id: str
|
|
|
|
) -> None:
|
|
|
|
"""Handles receiving a redaction and checking whether the redacted event
|
|
|
|
has any relations which must be removed from the database.
|
2018-09-13 10:05:52 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
txn
|
2022-03-07 09:00:05 -05:00
|
|
|
redacted_event_id: The event that was redacted.
|
2020-05-13 08:38:22 -04:00
|
|
|
"""
|
2018-09-13 10:05:52 -04:00
|
|
|
|
2022-03-07 09:00:05 -05:00
|
|
|
# Fetch the current relation of the event being redacted.
|
|
|
|
redacted_relates_to = self.db_pool.simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="event_relations",
|
|
|
|
keyvalues={"event_id": redacted_event_id},
|
|
|
|
retcol="relates_to_id",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
# Any relation information for the related event must be cleared.
|
|
|
|
if redacted_relates_to is not None:
|
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_relations_for_event, (redacted_relates_to,)
|
|
|
|
)
|
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,)
|
|
|
|
)
|
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_applicable_edit, (redacted_relates_to,)
|
|
|
|
)
|
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_thread_summary, (redacted_relates_to,)
|
|
|
|
)
|
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_thread_participated, (redacted_relates_to,)
|
|
|
|
)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
|
2018-09-13 10:05:52 -04:00
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _store_room_topic_txn(self, txn: LoggingTransaction, event: EventBase) -> None:
|
2022-02-24 06:52:28 -05:00
|
|
|
if isinstance(event.content.get("topic"), str):
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store_event_search_txn(
|
|
|
|
txn, event, "content.topic", event.content["topic"]
|
|
|
|
)
|
2016-09-02 05:41:38 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _store_room_name_txn(self, txn: LoggingTransaction, event: EventBase) -> None:
|
2022-02-24 06:52:28 -05:00
|
|
|
if isinstance(event.content.get("name"), str):
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store_event_search_txn(
|
|
|
|
txn, event, "content.name", event.content["name"]
|
|
|
|
)
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _store_room_message_txn(
|
|
|
|
self, txn: LoggingTransaction, event: EventBase
|
|
|
|
) -> None:
|
2022-02-24 06:52:28 -05:00
|
|
|
if isinstance(event.content.get("body"), str):
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store_event_search_txn(
|
|
|
|
txn, event, "content.body", event.content["body"]
|
|
|
|
)
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _store_retention_policy_for_room_txn(
|
|
|
|
self, txn: LoggingTransaction, event: EventBase
|
|
|
|
) -> None:
|
2020-10-14 07:00:52 -04:00
|
|
|
if not event.is_state():
|
|
|
|
logger.debug("Ignoring non-state m.room.retention event")
|
|
|
|
return
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if hasattr(event, "content") and (
|
|
|
|
"min_lifetime" in event.content or "max_lifetime" in event.content
|
|
|
|
):
|
|
|
|
if (
|
|
|
|
"min_lifetime" in event.content
|
2020-06-16 08:51:47 -04:00
|
|
|
and not isinstance(event.content.get("min_lifetime"), int)
|
2020-05-13 08:38:22 -04:00
|
|
|
) or (
|
|
|
|
"max_lifetime" in event.content
|
2020-06-16 08:51:47 -04:00
|
|
|
and not isinstance(event.content.get("max_lifetime"), int)
|
2020-05-13 08:38:22 -04:00
|
|
|
):
|
|
|
|
# Ignore the event if one of the value isn't an integer.
|
|
|
|
return
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn=txn,
|
|
|
|
table="room_retention",
|
|
|
|
values={
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"min_lifetime": event.content.get("min_lifetime"),
|
|
|
|
"max_lifetime": event.content.get("max_lifetime"),
|
|
|
|
},
|
|
|
|
)
|
2016-07-15 09:23:15 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_retention_policy_for_room, (event.room_id,)
|
|
|
|
)
|
2016-07-15 09:23:15 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
def store_event_search_txn(
|
|
|
|
self, txn: LoggingTransaction, event: EventBase, key: str, value: str
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
"""Add event to the search table
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
2021-12-30 13:47:12 -05:00
|
|
|
txn: The database transaction.
|
|
|
|
event: The event being added to the search table.
|
|
|
|
key: A key describing the search value (one of "content.name",
|
|
|
|
"content.topic", or "content.body")
|
|
|
|
value: The value from the event's content.
|
2019-03-28 09:37:16 -04:00
|
|
|
"""
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.store_search_entries_txn(
|
|
|
|
txn,
|
|
|
|
(
|
|
|
|
SearchEntry(
|
|
|
|
key=key,
|
|
|
|
value=value,
|
|
|
|
event_id=event.event_id,
|
|
|
|
room_id=event.room_id,
|
|
|
|
stream_ordering=event.internal_metadata.stream_ordering,
|
|
|
|
origin_server_ts=event.origin_server_ts,
|
|
|
|
),
|
|
|
|
),
|
2019-03-28 09:37:16 -04:00
|
|
|
)
|
2016-09-05 09:49:08 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _set_push_actions_for_event_and_users_txn(
|
2022-05-10 14:07:48 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
all_events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
"""Handles moving push actions from staging table to main
|
|
|
|
event_push_actions table for all events in `events_and_contexts`.
|
2018-10-04 10:18:52 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Also ensures that all events in `all_events_and_contexts` are removed
|
|
|
|
from the push action staging area.
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
2022-05-10 14:07:48 -04:00
|
|
|
events_and_contexts: events we are persisting
|
|
|
|
all_events_and_contexts: all events that we were going to persist.
|
|
|
|
This includes events we've already persisted, etc, that wouldn't
|
|
|
|
appear in events_and_context.
|
2020-05-13 08:38:22 -04:00
|
|
|
"""
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2021-09-08 10:18:35 -04:00
|
|
|
# Only non outlier events will have push actions associated with them,
|
|
|
|
# so let's filter them out. (This makes joining large rooms faster, as
|
|
|
|
# these queries took seconds to process all the state events).
|
|
|
|
non_outlier_events = [
|
|
|
|
event
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
if not event.internal_metadata.is_outlier()
|
|
|
|
]
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
sql = """
|
|
|
|
INSERT INTO event_push_actions (
|
|
|
|
room_id, event_id, user_id, actions, stream_ordering,
|
2020-09-02 12:19:37 -04:00
|
|
|
topological_ordering, notif, highlight, unread
|
2016-07-04 11:02:50 -04:00
|
|
|
)
|
2020-09-02 12:19:37 -04:00
|
|
|
SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread
|
2020-05-13 08:38:22 -04:00
|
|
|
FROM event_push_actions_staging
|
|
|
|
WHERE event_id = ?
|
|
|
|
"""
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2021-09-08 10:18:35 -04:00
|
|
|
if non_outlier_events:
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
sql,
|
|
|
|
(
|
|
|
|
(
|
|
|
|
event.room_id,
|
|
|
|
event.internal_metadata.stream_ordering,
|
|
|
|
event.depth,
|
|
|
|
event.event_id,
|
|
|
|
)
|
2021-09-08 10:18:35 -04:00
|
|
|
for event in non_outlier_events
|
2020-05-13 08:38:22 -04:00
|
|
|
),
|
|
|
|
)
|
2018-02-14 10:44:51 -05:00
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
room_to_event_ids: Dict[str, List[str]] = {}
|
2021-09-08 10:18:35 -04:00
|
|
|
for e in non_outlier_events:
|
2021-04-16 09:44:55 -04:00
|
|
|
room_to_event_ids.setdefault(e.room_id, []).append(e.event_id)
|
2018-02-14 10:44:51 -05:00
|
|
|
|
2021-04-16 09:44:55 -04:00
|
|
|
for room_id, event_ids in room_to_event_ids.items():
|
|
|
|
rows = self.db_pool.simple_select_many_txn(
|
|
|
|
txn,
|
|
|
|
table="event_push_actions_staging",
|
|
|
|
column="event_id",
|
|
|
|
iterable=event_ids,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("user_id",),
|
2020-05-13 08:38:22 -04:00
|
|
|
)
|
|
|
|
|
2021-04-16 09:44:55 -04:00
|
|
|
user_ids = {row["user_id"] for row in rows}
|
|
|
|
|
|
|
|
for user_id in user_ids:
|
|
|
|
txn.call_after(
|
2021-05-27 05:33:56 -04:00
|
|
|
self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
|
2021-04-16 09:44:55 -04:00
|
|
|
(room_id, user_id),
|
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
# Now we delete the staging area for *all* events that were being
|
|
|
|
# persisted.
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
"DELETE FROM event_push_actions_staging WHERE event_id = ?",
|
2021-09-08 10:18:35 -04:00
|
|
|
(
|
|
|
|
(event.event_id,)
|
|
|
|
for event, _ in all_events_and_contexts
|
|
|
|
if not event.internal_metadata.is_outlier()
|
|
|
|
),
|
2018-02-14 06:02:22 -05:00
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _remove_push_actions_for_event_id_txn(
|
|
|
|
self, txn: LoggingTransaction, room_id: str, event_id: str
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
# Sad that we have to blow away the cache for the whole room here
|
|
|
|
txn.call_after(
|
2021-05-27 05:33:56 -04:00
|
|
|
self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
|
2019-03-28 09:37:16 -04:00
|
|
|
(room_id,),
|
|
|
|
)
|
2018-02-09 07:13:34 -05:00
|
|
|
txn.execute(
|
2020-05-13 08:38:22 -04:00
|
|
|
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
|
|
|
|
(room_id, event_id),
|
2018-02-09 07:13:34 -05:00
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _store_rejections_txn(
|
|
|
|
self, txn: LoggingTransaction, event_id: str, reason: str
|
|
|
|
) -> None:
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="rejections",
|
|
|
|
values={
|
|
|
|
"event_id": event_id,
|
|
|
|
"reason": reason,
|
|
|
|
"last_check": self._clock.time_msec(),
|
|
|
|
},
|
|
|
|
)
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_event_state_mappings_txn(
|
2022-05-10 14:07:48 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: Collection[Tuple[EventBase, EventContext]],
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
state_groups = {}
|
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if event.internal_metadata.is_outlier():
|
2022-03-01 07:49:54 -05:00
|
|
|
# double-check that we don't have any events that claim to be outliers
|
|
|
|
# *and* have partial state (which is meaningless: we should have no
|
|
|
|
# state at all for an outlier)
|
|
|
|
if context.partial_state:
|
|
|
|
raise ValueError(
|
|
|
|
"Outlier event %s claims to have partial state", event.event_id
|
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
continue
|
2019-11-04 08:36:57 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
# if the event was rejected, just give it the same state as its
|
|
|
|
# predecessor.
|
|
|
|
if context.rejected:
|
|
|
|
state_groups[event.event_id] = context.state_group_before_event
|
|
|
|
continue
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
state_groups[event.event_id] = context.state_group
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2022-03-01 07:49:54 -05:00
|
|
|
# if we have partial state for these events, record the fact. (This happens
|
|
|
|
# here rather than in _store_event_txn because it also needs to happen when
|
|
|
|
# we de-outlier an event.)
|
|
|
|
self.db_pool.simple_insert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_events",
|
|
|
|
keys=("room_id", "event_id"),
|
|
|
|
values=[
|
|
|
|
(
|
|
|
|
event.room_id,
|
|
|
|
event.event_id,
|
|
|
|
)
|
|
|
|
for event, ctx in events_and_contexts
|
|
|
|
if ctx.partial_state
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2021-10-13 18:44:00 -04:00
|
|
|
self.db_pool.simple_upsert_many_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="event_to_state_groups",
|
2021-10-13 18:44:00 -04:00
|
|
|
key_names=["event_id"],
|
|
|
|
key_values=[[event_id] for event_id, _ in state_groups.items()],
|
|
|
|
value_names=["state_group"],
|
|
|
|
value_values=[
|
|
|
|
[state_group_id] for _, state_group_id in state_groups.items()
|
2020-05-13 08:38:22 -04:00
|
|
|
],
|
2019-08-22 05:42:59 -04:00
|
|
|
)
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for event_id, state_group_id in state_groups.items():
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store._get_state_group_for_event.prefill,
|
|
|
|
(event_id,),
|
|
|
|
state_group_id,
|
2019-08-22 05:42:59 -04:00
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _update_min_depth_for_room_txn(
|
|
|
|
self, txn: LoggingTransaction, room_id: str, depth: int
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
min_depth = self.store._get_min_depth_interaction(txn, room_id)
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if min_depth is not None and depth >= min_depth:
|
|
|
|
return
|
2017-04-11 12:34:09 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="room_depth",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={"min_depth": depth},
|
2017-04-11 12:34:09 -04:00
|
|
|
)
|
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _handle_mult_prev_events(
|
|
|
|
self, txn: LoggingTransaction, events: List[EventBase]
|
|
|
|
) -> None:
|
2019-11-01 06:30:51 -04:00
|
|
|
"""
|
2020-05-13 08:38:22 -04:00
|
|
|
For the given event, update the event edges table and forward and
|
|
|
|
backward extremities tables.
|
|
|
|
"""
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="event_edges",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=("event_id", "prev_event_id", "room_id", "is_state"),
|
2019-11-01 07:47:28 -04:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(ev.event_id, e_id, ev.room_id, False)
|
2020-05-13 08:38:22 -04:00
|
|
|
for ev in events
|
|
|
|
for e_id in ev.prev_event_ids()
|
2019-11-01 07:47:28 -04:00
|
|
|
],
|
2019-10-29 14:35:49 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
self._update_backward_extremeties(txn, events)
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2022-05-10 14:07:48 -04:00
|
|
|
def _update_backward_extremeties(
|
|
|
|
self, txn: LoggingTransaction, events: List[EventBase]
|
|
|
|
) -> None:
|
2020-05-13 08:38:22 -04:00
|
|
|
"""Updates the event_backward_extremities tables based on the new/updated
|
|
|
|
events being persisted.
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
This is called for new events *and* for events that were outliers, but
|
|
|
|
are now being persisted as non-outliers.
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Forward extremities are handled when we first start persisting the events.
|
2019-12-03 14:19:45 -05:00
|
|
|
"""
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
# From the events passed in, add all of the prev events as backwards extremities.
|
|
|
|
# Ignore any events that are already backwards extrems or outliers.
|
2020-05-13 08:38:22 -04:00
|
|
|
query = (
|
|
|
|
"INSERT INTO event_backward_extremities (event_id, room_id)"
|
|
|
|
" SELECT ?, ? WHERE NOT EXISTS ("
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
" SELECT 1 FROM event_backward_extremities"
|
|
|
|
" WHERE event_id = ? AND room_id = ?"
|
2020-05-13 08:38:22 -04:00
|
|
|
" )"
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
# 1. Don't add an event as a extremity again if we already persisted it
|
|
|
|
# as a non-outlier.
|
|
|
|
# 2. Don't add an outlier as an extremity if it has no prev_events
|
2020-05-13 08:38:22 -04:00
|
|
|
" AND NOT EXISTS ("
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
" SELECT 1 FROM events"
|
|
|
|
" LEFT JOIN event_edges edge"
|
|
|
|
" ON edge.event_id = events.event_id"
|
|
|
|
" WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = ? OR edge.event_id IS NULL)"
|
2020-05-13 08:38:22 -04:00
|
|
|
" )"
|
2019-12-03 14:19:45 -05:00
|
|
|
)
|
|
|
|
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
query,
|
|
|
|
[
|
|
|
|
(e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
|
|
|
|
for ev in events
|
|
|
|
for e_id in ev.prev_event_ids()
|
|
|
|
if not ev.internal_metadata.is_outlier()
|
|
|
|
],
|
2019-12-03 14:19:45 -05:00
|
|
|
)
|
|
|
|
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
# Delete all these events that we've already fetched and now know that their
|
|
|
|
# prev events are the new backwards extremeties.
|
2020-05-13 08:38:22 -04:00
|
|
|
query = (
|
|
|
|
"DELETE FROM event_backward_extremities"
|
|
|
|
" WHERE event_id = ? AND room_id = ?"
|
|
|
|
)
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
query,
|
|
|
|
[
|
|
|
|
(ev.event_id, ev.room_id)
|
|
|
|
for ev in events
|
|
|
|
if not ev.internal_metadata.is_outlier()
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 16:54:13 -05:00
|
|
|
# If we encountered an event with no prev_events, then we might
|
|
|
|
# as well remove it now because it won't ever have anything else
|
|
|
|
# to backfill from.
|
|
|
|
or len(ev.prev_event_ids()) == 0
|
2020-05-13 08:38:22 -04:00
|
|
|
],
|
|
|
|
)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
|
2022-01-13 08:49:28 -05:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2021-01-11 11:09:22 -05:00
|
|
|
class _LinkMap:
|
2021-02-16 17:32:34 -05:00
|
|
|
"""A helper type for tracking links between chains."""
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# Stores the set of links as nested maps: source chain ID -> target chain ID
|
|
|
|
# -> source sequence number -> target sequence number.
|
2022-01-13 08:49:28 -05:00
|
|
|
maps: Dict[int, Dict[int, Dict[int, int]]] = attr.Factory(dict)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# Stores the links that have been added (with new set to true), as tuples of
|
|
|
|
# `(source chain ID, source sequence no, target chain ID, target sequence no.)`
|
2022-01-13 08:49:28 -05:00
|
|
|
additions: Set[Tuple[int, int, int, int]] = attr.Factory(set)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
def add_link(
|
|
|
|
self,
|
|
|
|
src_tuple: Tuple[int, int],
|
|
|
|
target_tuple: Tuple[int, int],
|
|
|
|
new: bool = True,
|
|
|
|
) -> bool:
|
|
|
|
"""Add a new link between two chains, ensuring no redundant links are added.
|
|
|
|
|
|
|
|
New links should be added in topological order.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
src_tuple: The chain ID/sequence number of the source of the link.
|
|
|
|
target_tuple: The chain ID/sequence number of the target of the link.
|
|
|
|
new: Whether this is a "new" link, i.e. should it be returned
|
|
|
|
by `get_additions`.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
True if a link was added, false if the given link was dropped as redundant
|
|
|
|
"""
|
|
|
|
src_chain, src_seq = src_tuple
|
|
|
|
target_chain, target_seq = target_tuple
|
|
|
|
|
|
|
|
current_links = self.maps.setdefault(src_chain, {}).setdefault(target_chain, {})
|
|
|
|
|
|
|
|
assert src_chain != target_chain
|
|
|
|
|
|
|
|
if new:
|
|
|
|
# Check if the new link is redundant
|
|
|
|
for current_seq_src, current_seq_target in current_links.items():
|
|
|
|
# If a link "crosses" another link then its redundant. For example
|
|
|
|
# in the following link 1 (L1) is redundant, as any event reachable
|
|
|
|
# via L1 is *also* reachable via L2.
|
|
|
|
#
|
|
|
|
# Chain A Chain B
|
|
|
|
# | |
|
|
|
|
# L1 |------ |
|
|
|
|
# | | |
|
|
|
|
# L2 |---- | -->|
|
|
|
|
# | | |
|
|
|
|
# | |--->|
|
|
|
|
# | |
|
|
|
|
# | |
|
|
|
|
#
|
|
|
|
# So we only need to keep links which *do not* cross, i.e. links
|
|
|
|
# that both start and end above or below an existing link.
|
|
|
|
#
|
|
|
|
# Note, since we add links in topological ordering we should never
|
|
|
|
# see `src_seq` less than `current_seq_src`.
|
|
|
|
|
|
|
|
if current_seq_src <= src_seq and target_seq <= current_seq_target:
|
|
|
|
# This new link is redundant, nothing to do.
|
|
|
|
return False
|
|
|
|
|
|
|
|
self.additions.add((src_chain, src_seq, target_chain, target_seq))
|
|
|
|
|
|
|
|
current_links[src_seq] = target_seq
|
|
|
|
return True
|
|
|
|
|
|
|
|
def get_links_from(
|
|
|
|
self, src_tuple: Tuple[int, int]
|
|
|
|
) -> Generator[Tuple[int, int], None, None]:
|
|
|
|
"""Gets the chains reachable from the given chain/sequence number.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
The chain ID and sequence number the link points to.
|
|
|
|
"""
|
|
|
|
src_chain, src_seq = src_tuple
|
|
|
|
for target_id, sequence_numbers in self.maps.get(src_chain, {}).items():
|
|
|
|
for link_src_seq, target_seq in sequence_numbers.items():
|
|
|
|
if link_src_seq <= src_seq:
|
|
|
|
yield target_id, target_seq
|
|
|
|
|
|
|
|
def get_links_between(
|
|
|
|
self, source_chain: int, target_chain: int
|
|
|
|
) -> Generator[Tuple[int, int], None, None]:
|
|
|
|
"""Gets the links between two chains.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
The source and target sequence numbers.
|
|
|
|
"""
|
|
|
|
|
|
|
|
yield from self.maps.get(source_chain, {}).get(target_chain, {}).items()
|
|
|
|
|
|
|
|
def get_additions(self) -> Generator[Tuple[int, int, int, int], None, None]:
|
|
|
|
"""Gets any newly added links.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
The source chain ID/sequence number and target chain ID/sequence number
|
|
|
|
"""
|
|
|
|
|
|
|
|
for src_chain, src_seq, target_chain, _ in self.additions:
|
|
|
|
target_seq = self.maps.get(src_chain, {}).get(target_chain, {}).get(src_seq)
|
|
|
|
if target_seq is not None:
|
|
|
|
yield (src_chain, src_seq, target_chain, target_seq)
|
|
|
|
|
|
|
|
def exists_path_from(
|
2021-02-16 17:32:34 -05:00
|
|
|
self,
|
|
|
|
src_tuple: Tuple[int, int],
|
|
|
|
target_tuple: Tuple[int, int],
|
2021-01-11 11:09:22 -05:00
|
|
|
) -> bool:
|
|
|
|
"""Checks if there is a path between the source chain ID/sequence and
|
|
|
|
target chain ID/sequence.
|
|
|
|
"""
|
|
|
|
src_chain, src_seq = src_tuple
|
|
|
|
target_chain, target_seq = target_tuple
|
|
|
|
|
|
|
|
if src_chain == target_chain:
|
|
|
|
return target_seq <= src_seq
|
|
|
|
|
|
|
|
links = self.get_links_between(src_chain, target_chain)
|
|
|
|
for link_start_seq, link_end_seq in links:
|
|
|
|
if link_start_seq <= src_seq and target_seq <= link_end_seq:
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|