2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2019-05-30 06:22:59 -04:00
|
|
|
# Copyright 2018-2019 New Vector Ltd
|
|
|
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
2015-03-20 09:52:56 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-04-17 13:30:53 -04:00
|
|
|
import itertools
|
2018-03-29 17:57:28 -04:00
|
|
|
import logging
|
2020-05-13 08:38:22 -04:00
|
|
|
from collections import OrderedDict, namedtuple
|
2021-01-11 11:09:22 -05:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Dict,
|
|
|
|
Generator,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
)
|
2018-07-09 02:09:20 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
import attr
|
2019-10-23 07:00:21 -04:00
|
|
|
from prometheus_client import Counter
|
2018-06-28 09:49:57 -04:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
import synapse.metrics
|
2020-07-07 09:20:40 -04:00
|
|
|
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
2020-01-29 06:01:32 -05:00
|
|
|
from synapse.api.room_versions import RoomVersions
|
2020-05-13 08:38:22 -04:00
|
|
|
from synapse.crypto.event_signing import compute_event_reference_hash
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.events import EventBase # noqa: F401
|
|
|
|
from synapse.events.snapshot import EventContext # noqa: F401
|
2019-07-03 10:07:04 -04:00
|
|
|
from synapse.logging.utils import log_function
|
2020-07-16 11:32:19 -04:00
|
|
|
from synapse.storage._base import db_to_json, make_in_list_sql_clause
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.database import DatabasePool, LoggingTransaction
|
|
|
|
from synapse.storage.databases.main.search import SearchEntry
|
2021-02-23 07:33:24 -05:00
|
|
|
from synapse.storage.types import Connection
|
2020-09-14 05:16:41 -04:00
|
|
|
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
2021-02-24 05:13:53 -05:00
|
|
|
from synapse.storage.util.sequence import SequenceGenerator
|
2020-05-13 08:38:22 -04:00
|
|
|
from synapse.types import StateMap, get_domain_from_id
|
2020-10-28 11:51:15 -04:00
|
|
|
from synapse.util import json_encoder
|
2021-01-11 11:09:22 -05:00
|
|
|
from synapse.util.iterutils import batch_iter, sorted_topologically
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.databases.main import DataStore
|
2020-05-13 08:38:22 -04:00
|
|
|
|
|
|
|
|
2018-05-21 20:48:57 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2018-05-21 20:48:57 -04:00
|
|
|
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
|
2019-03-28 09:37:16 -04:00
|
|
|
event_counter = Counter(
|
|
|
|
"synapse_storage_events_persisted_events_sep",
|
|
|
|
"",
|
|
|
|
["type", "origin_type", "origin_entity"],
|
|
|
|
)
|
2018-03-27 08:13:38 -04:00
|
|
|
|
2016-06-06 06:58:09 -04:00
|
|
|
|
2016-06-03 12:55:32 -04:00
|
|
|
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
|
|
|
|
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
@attr.s(slots=True)
|
|
|
|
class DeltaState:
|
|
|
|
"""Deltas to use to update the `current_state_events` table.
|
2019-05-28 13:52:41 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Attributes:
|
|
|
|
to_delete: List of type/state_keys to delete from current state
|
|
|
|
to_insert: Map of state to upsert into current state
|
|
|
|
no_longer_in_room: The server is not longer in the room, so the room
|
|
|
|
should e.g. be removed from `current_state_events` table.
|
|
|
|
"""
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
to_delete = attr.ib(type=List[Tuple[str, str]])
|
|
|
|
to_insert = attr.ib(type=StateMap[str])
|
|
|
|
no_longer_in_room = attr.ib(type=bool, default=False)
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2019-07-03 04:31:27 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
class PersistEventsStore:
|
|
|
|
"""Contains all the functions for writing events to the database.
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2020-05-13 05:37:11 -04:00
|
|
|
Should only be instantiated on one process (when using a worker mode setup).
|
2020-05-13 08:38:22 -04:00
|
|
|
|
|
|
|
Note: This is not part of the `DataStore` mixin.
|
|
|
|
"""
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
def __init__(
|
2021-02-23 07:33:24 -05:00
|
|
|
self,
|
|
|
|
hs: "HomeServer",
|
|
|
|
db: DatabasePool,
|
|
|
|
main_data_store: "DataStore",
|
|
|
|
db_conn: Connection,
|
2020-08-05 16:38:57 -04:00
|
|
|
):
|
2020-05-13 08:38:22 -04:00
|
|
|
self.hs = hs
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool = db
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store = main_data_store
|
|
|
|
self.database_engine = db.engine
|
|
|
|
self._clock = hs.get_clock()
|
2020-09-14 05:16:41 -04:00
|
|
|
self._instance_name = hs.get_instance_name()
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2019-12-03 14:19:45 -05:00
|
|
|
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
|
2020-01-15 09:59:33 -05:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
# Ideally we'd move these ID gens here, unfortunately some other ID
|
|
|
|
# generators are chained off them so doing so is a bit of a PITA.
|
2021-07-15 12:46:54 -04:00
|
|
|
self._backfill_id_gen: MultiWriterIdGenerator = self.store._backfill_id_gen
|
|
|
|
self._stream_id_gen: MultiWriterIdGenerator = self.store._stream_id_gen
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2020-05-22 11:11:35 -04:00
|
|
|
# This should only exist on instances that are configured to write
|
2020-05-13 08:38:22 -04:00
|
|
|
assert (
|
2020-09-14 05:16:41 -04:00
|
|
|
hs.get_instance_name() in hs.config.worker.writers.events
|
2020-05-22 11:11:35 -04:00
|
|
|
), "Can only instantiate EventsStore on master"
|
2019-06-13 08:40:52 -04:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
async def _persist_events_and_state_updates(
|
2019-10-23 07:00:21 -04:00
|
|
|
self,
|
2020-01-20 13:07:20 -05:00
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
current_state_for_room: Dict[str, StateMap[str]],
|
|
|
|
state_delta_for_room: Dict[str, DeltaState],
|
|
|
|
new_forward_extremeties: Dict[str, List[str]],
|
|
|
|
backfilled: bool = False,
|
2020-08-14 10:05:19 -04:00
|
|
|
) -> None:
|
2019-10-30 09:33:38 -04:00
|
|
|
"""Persist a set of events alongside updates to the current state and
|
|
|
|
forward extremities tables.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Args:
|
2020-01-20 13:07:20 -05:00
|
|
|
events_and_contexts:
|
|
|
|
current_state_for_room: Map from room_id to the current state of
|
|
|
|
the room based on forward extremities
|
|
|
|
state_delta_for_room: Map from room_id to the delta to apply to
|
|
|
|
room state
|
|
|
|
new_forward_extremities: Map from room_id to list of event IDs
|
|
|
|
that are the new forward extremities of the room.
|
|
|
|
backfilled
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-14 10:05:19 -04:00
|
|
|
Resolves when the events have been persisted
|
2017-03-17 07:51:13 -04:00
|
|
|
"""
|
2015-06-25 12:18:19 -04:00
|
|
|
|
2019-10-23 07:00:21 -04:00
|
|
|
# We want to calculate the stream orderings as late as possible, as
|
|
|
|
# we only notify after all events with a lesser stream ordering have
|
|
|
|
# been persisted. I.e. if we spend 10s inside the with block then
|
|
|
|
# that will delay all subsequent events from being notified about.
|
|
|
|
# Hence why we do it down here rather than wrapping the entire
|
|
|
|
# function.
|
|
|
|
#
|
|
|
|
# Its safe to do this after calculating the state deltas etc as we
|
|
|
|
# only need to protect the *persistence* of the events. This is to
|
|
|
|
# ensure that queries of the form "fetch events since X" don't
|
|
|
|
# return events and stream positions after events that are still in
|
|
|
|
# flight, as otherwise subsequent requests "fetch event since Y"
|
|
|
|
# will not return those events.
|
|
|
|
#
|
|
|
|
# Note: Multiple instances of this function cannot be in flight at
|
|
|
|
# the same time for the same room.
|
|
|
|
if backfilled:
|
2020-09-23 11:11:18 -04:00
|
|
|
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
2019-10-23 07:00:21 -04:00
|
|
|
len(events_and_contexts)
|
|
|
|
)
|
|
|
|
else:
|
2020-09-23 11:11:18 -04:00
|
|
|
stream_ordering_manager = self._stream_id_gen.get_next_mult(
|
2019-10-23 07:00:21 -04:00
|
|
|
len(events_and_contexts)
|
|
|
|
)
|
2019-08-06 08:27:22 -04:00
|
|
|
|
2020-09-23 11:11:18 -04:00
|
|
|
async with stream_ordering_manager as stream_orderings:
|
2021-04-20 06:50:49 -04:00
|
|
|
for (event, _), stream in zip(events_and_contexts, stream_orderings):
|
2019-10-23 07:00:21 -04:00
|
|
|
event.internal_metadata.stream_ordering = stream
|
2019-08-06 08:27:22 -04:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-10-23 07:00:21 -04:00
|
|
|
"persist_events",
|
|
|
|
self._persist_events_txn,
|
|
|
|
events_and_contexts=events_and_contexts,
|
|
|
|
backfilled=backfilled,
|
|
|
|
state_delta_for_room=state_delta_for_room,
|
|
|
|
new_forward_extremeties=new_forward_extremeties,
|
|
|
|
)
|
|
|
|
persist_event_counter.inc(len(events_and_contexts))
|
2019-08-06 08:27:22 -04:00
|
|
|
|
|
|
|
if not backfilled:
|
2019-10-23 07:00:21 -04:00
|
|
|
# backfilled events have negative stream orderings, so we don't
|
|
|
|
# want to set the event_persisted_position to that.
|
|
|
|
synapse.metrics.event_persisted_position.set(
|
|
|
|
events_and_contexts[-1][0].internal_metadata.stream_ordering
|
2016-08-30 11:54:40 -04:00
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2019-10-23 07:00:21 -04:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if context.app_service:
|
|
|
|
origin_type = "local"
|
|
|
|
origin_entity = context.app_service.id
|
|
|
|
elif self.hs.is_mine_id(event.sender):
|
|
|
|
origin_type = "local"
|
|
|
|
origin_entity = "*client*"
|
|
|
|
else:
|
|
|
|
origin_type = "remote"
|
|
|
|
origin_entity = get_domain_from_id(event.sender)
|
|
|
|
|
|
|
|
event_counter.labels(event.type, origin_type, origin_entity).inc()
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, new_state in current_state_for_room.items():
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.get_current_state_ids.prefill((room_id,), new_state)
|
2019-10-23 07:00:21 -04:00
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, latest_event_ids in new_forward_extremeties.items():
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.get_latest_event_ids_in_room.prefill(
|
2019-10-23 07:00:21 -04:00
|
|
|
(room_id,), list(latest_event_ids)
|
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
|
2018-10-02 18:33:29 -04:00
|
|
|
"""Filter the supplied list of event_ids to get those which are prev_events of
|
2018-10-03 05:19:41 -04:00
|
|
|
existing (non-outlier/rejected) events.
|
2018-10-02 18:33:29 -04:00
|
|
|
|
|
|
|
Args:
|
2020-08-14 10:05:19 -04:00
|
|
|
event_ids: event ids to filter
|
2018-10-02 18:33:29 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-14 10:05:19 -04:00
|
|
|
Filtered event ids
|
2018-10-02 18:33:29 -04:00
|
|
|
"""
|
2021-07-15 12:46:54 -04:00
|
|
|
results: List[str] = []
|
2018-10-02 18:33:29 -04:00
|
|
|
|
2019-05-29 06:56:24 -04:00
|
|
|
def _get_events_which_are_prevs_txn(txn, batch):
|
2018-10-02 18:33:29 -04:00
|
|
|
sql = """
|
2019-05-21 11:10:54 -04:00
|
|
|
SELECT prev_event_id, internal_metadata
|
2018-10-02 18:33:29 -04:00
|
|
|
FROM event_edges
|
|
|
|
INNER JOIN events USING (event_id)
|
|
|
|
LEFT JOIN rejections USING (event_id)
|
2019-05-21 11:10:54 -04:00
|
|
|
LEFT JOIN event_json USING (event_id)
|
2018-10-02 18:33:29 -04:00
|
|
|
WHERE
|
2019-10-02 14:07:07 -04:00
|
|
|
NOT events.outlier
|
2018-10-02 18:33:29 -04:00
|
|
|
AND rejections.event_id IS NULL
|
2019-10-02 14:07:07 -04:00
|
|
|
AND
|
|
|
|
"""
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "prev_event_id", batch
|
2018-10-02 18:33:29 -04:00
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(sql + clause, args)
|
2020-07-16 11:32:19 -04:00
|
|
|
results.extend(r[0] for r in txn if not db_to_json(r[1]).get("soft_failed"))
|
2018-10-02 18:33:29 -04:00
|
|
|
|
|
|
|
for chunk in batch_iter(event_ids, 100):
|
2020-08-14 10:05:19 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-06-13 08:40:52 -04:00
|
|
|
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
|
2019-05-29 06:56:24 -04:00
|
|
|
)
|
2018-10-02 18:33:29 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return results
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2020-08-14 10:05:19 -04:00
|
|
|
async def _get_prevs_before_rejected(self, event_ids: Iterable[str]) -> Set[str]:
|
2019-05-29 06:56:24 -04:00
|
|
|
"""Get soft-failed ancestors to remove from the extremities.
|
|
|
|
|
|
|
|
Given a set of events, find all those that have been soft-failed or
|
|
|
|
rejected. Returns those soft failed/rejected events and their prev
|
|
|
|
events (whether soft-failed/rejected or not), and recurses up the
|
|
|
|
prev-event graph until it finds no more soft-failed/rejected events.
|
|
|
|
|
|
|
|
This is used to find extremities that are ancestors of new events, but
|
|
|
|
are separated by soft failed events.
|
|
|
|
|
|
|
|
Args:
|
2020-08-14 10:05:19 -04:00
|
|
|
event_ids: Events to find prev events for. Note that these must have
|
|
|
|
already been persisted.
|
2019-05-29 06:56:24 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-14 10:05:19 -04:00
|
|
|
The previous events.
|
2019-05-29 06:56:24 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
# The set of event_ids to return. This includes all soft-failed events
|
|
|
|
# and their prev events.
|
|
|
|
existing_prevs = set()
|
|
|
|
|
|
|
|
def _get_prevs_before_rejected_txn(txn, batch):
|
|
|
|
to_recursively_check = batch
|
|
|
|
|
|
|
|
while to_recursively_check:
|
|
|
|
sql = """
|
|
|
|
SELECT
|
|
|
|
event_id, prev_event_id, internal_metadata,
|
|
|
|
rejections.event_id IS NOT NULL
|
|
|
|
FROM event_edges
|
|
|
|
INNER JOIN events USING (event_id)
|
|
|
|
LEFT JOIN rejections USING (event_id)
|
|
|
|
LEFT JOIN event_json USING (event_id)
|
|
|
|
WHERE
|
2019-10-02 14:07:07 -04:00
|
|
|
NOT events.outlier
|
|
|
|
AND
|
|
|
|
"""
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "event_id", to_recursively_check
|
2019-05-29 06:56:24 -04:00
|
|
|
)
|
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(sql + clause, args)
|
2019-05-29 06:56:24 -04:00
|
|
|
to_recursively_check = []
|
|
|
|
|
2021-04-20 06:50:49 -04:00
|
|
|
for _, prev_event_id, metadata, rejected in txn:
|
2019-05-29 06:56:24 -04:00
|
|
|
if prev_event_id in existing_prevs:
|
|
|
|
continue
|
|
|
|
|
2020-07-16 11:32:19 -04:00
|
|
|
soft_failed = db_to_json(metadata).get("soft_failed")
|
2019-05-29 06:56:24 -04:00
|
|
|
if soft_failed or rejected:
|
|
|
|
to_recursively_check.append(prev_event_id)
|
|
|
|
existing_prevs.add(prev_event_id)
|
|
|
|
|
|
|
|
for chunk in batch_iter(event_ids, 100):
|
2020-08-14 10:05:19 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-06-13 08:40:52 -04:00
|
|
|
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
|
2019-05-29 06:56:24 -04:00
|
|
|
)
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return existing_prevs
|
2019-05-29 06:56:24 -04:00
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
@log_function
|
2019-03-28 09:37:16 -04:00
|
|
|
def _persist_events_txn(
|
|
|
|
self,
|
2020-01-20 13:07:20 -05:00
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
backfilled: bool,
|
2021-04-08 17:38:54 -04:00
|
|
|
state_delta_for_room: Optional[Dict[str, DeltaState]] = None,
|
|
|
|
new_forward_extremeties: Optional[Dict[str, List[str]]] = None,
|
2019-03-28 09:37:16 -04:00
|
|
|
):
|
2016-07-26 05:49:52 -04:00
|
|
|
"""Insert some number of room events into the necessary database tables.
|
|
|
|
|
|
|
|
Rejected events are only inserted into the events table, the events_json table,
|
|
|
|
and the rejections table. Things reading from those table will need to check
|
|
|
|
whether the event was rejected.
|
2016-08-04 10:02:15 -04:00
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
Args:
|
2020-01-20 13:07:20 -05:00
|
|
|
txn
|
|
|
|
events_and_contexts: events to persist
|
|
|
|
backfilled: True if the events were backfilled
|
|
|
|
delete_existing True to purge existing table rows for the events
|
|
|
|
from the database. This is useful when retrying due to
|
2017-03-17 07:51:13 -04:00
|
|
|
IntegrityError.
|
2020-01-20 13:07:20 -05:00
|
|
|
state_delta_for_room: The current-state delta for each room.
|
|
|
|
new_forward_extremetie: The new forward extremities for each room.
|
|
|
|
For each room, a list of the event ids which are the forward
|
|
|
|
extremities.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
"""
|
2021-04-08 17:38:54 -04:00
|
|
|
state_delta_for_room = state_delta_for_room or {}
|
|
|
|
new_forward_extremeties = new_forward_extremeties or {}
|
|
|
|
|
2018-02-20 07:33:04 -05:00
|
|
|
all_events_and_contexts = events_and_contexts
|
|
|
|
|
2019-04-02 07:42:39 -04:00
|
|
|
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
|
2017-01-20 09:28:53 -05:00
|
|
|
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
|
2017-05-30 09:41:42 -04:00
|
|
|
|
2020-10-05 09:43:14 -04:00
|
|
|
# stream orderings should have been assigned by now
|
|
|
|
assert min_stream_order
|
|
|
|
assert max_stream_order
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
self._update_forward_extremities_txn(
|
|
|
|
txn,
|
|
|
|
new_forward_extremities=new_forward_extremeties,
|
|
|
|
max_stream_order=max_stream_order,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Ensure that we don't have the same event twice.
|
|
|
|
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
|
2019-03-28 09:37:16 -04:00
|
|
|
events_and_contexts
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
self._update_room_depths_txn(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, events_and_contexts=events_and_contexts, backfilled=backfilled
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# _update_outliers_txn filters out any events which have already been
|
|
|
|
# persisted, and returns the filtered list.
|
|
|
|
events_and_contexts = self._update_outliers_txn(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, events_and_contexts=events_and_contexts
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# From this point onwards the events are only events that we haven't
|
|
|
|
# seen before.
|
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
self._persist_transaction_ids_txn(txn, events_and_contexts)
|
|
|
|
|
2018-02-06 09:31:24 -05:00
|
|
|
# Insert into event_to_state_groups.
|
|
|
|
self._store_event_state_mappings_txn(txn, events_and_contexts)
|
2017-03-17 10:30:16 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
self._persist_event_auth_chain_txn(txn, [e for e, _ in events_and_contexts])
|
|
|
|
|
|
|
|
# _store_rejected_events_txn filters out any events which were
|
|
|
|
# rejected, and returns the filtered list.
|
|
|
|
events_and_contexts = self._store_rejected_events_txn(
|
|
|
|
txn, events_and_contexts=events_and_contexts
|
|
|
|
)
|
|
|
|
|
|
|
|
# From this point onwards the events are only ones that weren't
|
|
|
|
# rejected.
|
|
|
|
|
|
|
|
self._update_metadata_tables_txn(
|
|
|
|
txn,
|
|
|
|
events_and_contexts=events_and_contexts,
|
|
|
|
all_events_and_contexts=all_events_and_contexts,
|
|
|
|
backfilled=backfilled,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We call this last as it assumes we've inserted the events into
|
|
|
|
# room_memberships, where applicable.
|
|
|
|
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
|
|
|
|
|
|
|
|
def _persist_event_auth_chain_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events: List[EventBase],
|
2021-01-11 11:09:22 -05:00
|
|
|
) -> None:
|
|
|
|
|
|
|
|
# We only care about state events, so this if there are no state events.
|
|
|
|
if not any(e.is_state() for e in events):
|
|
|
|
return
|
|
|
|
|
2018-10-16 09:01:53 -04:00
|
|
|
# We want to store event_auth mappings for rejected events, as they're
|
|
|
|
# used in state res v2.
|
|
|
|
# This is only necessary if the rejected event appears in an accepted
|
|
|
|
# event's auth chain, but its easier for now just to store them (and
|
|
|
|
# it doesn't take much storage compared to storing the entire event
|
|
|
|
# anyway).
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2018-10-16 09:01:53 -04:00
|
|
|
txn,
|
|
|
|
table="event_auth",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"auth_id": auth_id,
|
|
|
|
}
|
2021-01-11 11:09:22 -05:00
|
|
|
for event in events
|
2018-11-05 08:35:15 -05:00
|
|
|
for auth_id in event.auth_event_ids()
|
2018-10-16 09:01:53 -04:00
|
|
|
if event.is_state()
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
# We now calculate chain ID/sequence numbers for any state events we're
|
|
|
|
# persisting. We ignore out of band memberships as we're not in the room
|
|
|
|
# and won't have their auth chain (we'll fix it up later if we join the
|
|
|
|
# room).
|
|
|
|
#
|
|
|
|
# See: docs/auth_chain_difference_algorithm.md
|
|
|
|
|
|
|
|
# We ignore legacy rooms that we aren't filling the chain cover index
|
|
|
|
# for.
|
|
|
|
rows = self.db_pool.simple_select_many_txn(
|
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
column="room_id",
|
|
|
|
iterable={event.room_id for event in events if event.is_state()},
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("room_id", "has_auth_chain_index"),
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
2021-01-11 11:09:22 -05:00
|
|
|
rooms_using_chain_index = {
|
|
|
|
row["room_id"] for row in rows if row["has_auth_chain_index"]
|
|
|
|
}
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
state_events = {
|
|
|
|
event.event_id: event
|
|
|
|
for event in events
|
|
|
|
if event.is_state() and event.room_id in rooms_using_chain_index
|
|
|
|
}
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
if not state_events:
|
|
|
|
return
|
|
|
|
|
|
|
|
# We need to know the type/state_key and auth events of the events we're
|
|
|
|
# calculating chain IDs for. We don't rely on having the full Event
|
|
|
|
# instances as we'll potentially be pulling more events from the DB and
|
|
|
|
# we don't need the overhead of fetching/parsing the full event JSON.
|
|
|
|
event_to_types = {
|
|
|
|
e.event_id: (e.type, e.state_key) for e in state_events.values()
|
|
|
|
}
|
|
|
|
event_to_auth_chain = {
|
|
|
|
e.event_id: e.auth_event_ids() for e in state_events.values()
|
|
|
|
}
|
2021-01-14 10:18:27 -05:00
|
|
|
event_to_room_id = {e.event_id: e.room_id for e in state_events.values()}
|
|
|
|
|
|
|
|
self._add_chain_cover_index(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
self.db_pool,
|
2021-02-24 05:13:53 -05:00
|
|
|
self.store.event_chain_id_gen,
|
2021-02-16 17:32:34 -05:00
|
|
|
event_to_room_id,
|
|
|
|
event_to_types,
|
|
|
|
event_to_auth_chain,
|
2021-01-14 10:18:27 -05:00
|
|
|
)
|
|
|
|
|
2021-01-21 12:00:12 -05:00
|
|
|
@classmethod
|
2021-01-14 10:18:27 -05:00
|
|
|
def _add_chain_cover_index(
|
2021-01-21 12:00:12 -05:00
|
|
|
cls,
|
2021-01-14 10:18:27 -05:00
|
|
|
txn,
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool: DatabasePool,
|
2021-02-24 05:13:53 -05:00
|
|
|
event_chain_id_gen: SequenceGenerator,
|
2021-01-14 10:18:27 -05:00
|
|
|
event_to_room_id: Dict[str, str],
|
|
|
|
event_to_types: Dict[str, Tuple[str, str]],
|
|
|
|
event_to_auth_chain: Dict[str, List[str]],
|
|
|
|
) -> None:
|
|
|
|
"""Calculate the chain cover index for the given events.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_to_room_id: Event ID to the room ID of the event
|
|
|
|
event_to_types: Event ID to type and state_key of the event
|
|
|
|
event_to_auth_chain: Event ID to list of auth event IDs of the
|
|
|
|
event (events with no auth events can be excluded).
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Map from event ID to chain ID/sequence number.
|
2021-07-15 12:46:54 -04:00
|
|
|
chain_map: Dict[str, Tuple[int, int]] = {}
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# Set of event IDs to calculate chain ID/seq numbers for.
|
2021-01-14 10:18:27 -05:00
|
|
|
events_to_calc_chain_id_for = set(event_to_room_id)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# We check if there are any events that need to be handled in the rooms
|
|
|
|
# we're looking at. These should just be out of band memberships, where
|
|
|
|
# we didn't have the auth chain when we first persisted.
|
2021-01-14 12:19:35 -05:00
|
|
|
rows = db_pool.simple_select_many_txn(
|
2017-03-17 07:51:13 -04:00
|
|
|
txn,
|
2021-01-11 11:09:22 -05:00
|
|
|
table="event_auth_chain_to_calculate",
|
|
|
|
keyvalues={},
|
|
|
|
column="room_id",
|
2021-01-14 10:18:27 -05:00
|
|
|
iterable=set(event_to_room_id.values()),
|
2021-01-11 11:09:22 -05:00
|
|
|
retcols=("event_id", "type", "state_key"),
|
2017-03-17 07:51:13 -04:00
|
|
|
)
|
2021-01-11 11:09:22 -05:00
|
|
|
for row in rows:
|
|
|
|
event_id = row["event_id"]
|
|
|
|
event_type = row["type"]
|
|
|
|
state_key = row["state_key"]
|
|
|
|
|
|
|
|
# (We could pull out the auth events for all rows at once using
|
|
|
|
# simple_select_many, but this case happens rarely and almost always
|
|
|
|
# with a single row.)
|
2021-01-14 12:19:35 -05:00
|
|
|
auth_events = db_pool.simple_select_onecol_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
"event_auth",
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
retcol="auth_id",
|
2021-01-11 11:09:22 -05:00
|
|
|
)
|
2017-03-17 07:51:13 -04:00
|
|
|
|
2021-01-11 11:09:22 -05:00
|
|
|
events_to_calc_chain_id_for.add(event_id)
|
|
|
|
event_to_types[event_id] = (event_type, state_key)
|
|
|
|
event_to_auth_chain[event_id] = auth_events
|
|
|
|
|
|
|
|
# First we get the chain ID and sequence numbers for the events'
|
|
|
|
# auth events (that aren't also currently being persisted).
|
|
|
|
#
|
|
|
|
# Note that there there is an edge case here where we might not have
|
|
|
|
# calculated chains and sequence numbers for events that were "out
|
|
|
|
# of band". We handle this case by fetching the necessary info and
|
|
|
|
# adding it to the set of events to calculate chain IDs for.
|
|
|
|
|
|
|
|
missing_auth_chains = {
|
|
|
|
a_id
|
|
|
|
for auth_events in event_to_auth_chain.values()
|
|
|
|
for a_id in auth_events
|
|
|
|
if a_id not in events_to_calc_chain_id_for
|
|
|
|
}
|
|
|
|
|
|
|
|
# We loop here in case we find an out of band membership and need to
|
|
|
|
# fetch their auth event info.
|
|
|
|
while missing_auth_chains:
|
|
|
|
sql = """
|
|
|
|
SELECT event_id, events.type, state_key, chain_id, sequence_number
|
|
|
|
FROM events
|
|
|
|
INNER JOIN state_events USING (event_id)
|
|
|
|
LEFT JOIN event_auth_chains USING (event_id)
|
|
|
|
WHERE
|
|
|
|
"""
|
|
|
|
clause, args = make_in_list_sql_clause(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn.database_engine,
|
|
|
|
"event_id",
|
|
|
|
missing_auth_chains,
|
2021-01-11 11:09:22 -05:00
|
|
|
)
|
|
|
|
txn.execute(sql + clause, args)
|
|
|
|
|
|
|
|
missing_auth_chains.clear()
|
|
|
|
|
2021-09-03 05:59:25 -04:00
|
|
|
for (
|
|
|
|
auth_id,
|
|
|
|
event_type,
|
|
|
|
state_key,
|
|
|
|
chain_id,
|
|
|
|
sequence_number,
|
|
|
|
) in txn.fetchall():
|
2021-01-11 11:09:22 -05:00
|
|
|
event_to_types[auth_id] = (event_type, state_key)
|
|
|
|
|
|
|
|
if chain_id is None:
|
|
|
|
# No chain ID, so the event was persisted out of band.
|
|
|
|
# We add to list of events to calculate auth chains for.
|
|
|
|
|
|
|
|
events_to_calc_chain_id_for.add(auth_id)
|
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
event_to_auth_chain[auth_id] = db_pool.simple_select_onecol_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
"event_auth",
|
|
|
|
keyvalues={"event_id": auth_id},
|
|
|
|
retcol="auth_id",
|
|
|
|
)
|
|
|
|
|
|
|
|
missing_auth_chains.update(
|
|
|
|
e
|
|
|
|
for e in event_to_auth_chain[auth_id]
|
|
|
|
if e not in event_to_types
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
chain_map[auth_id] = (chain_id, sequence_number)
|
|
|
|
|
|
|
|
# Now we check if we have any events where we don't have auth chain,
|
|
|
|
# this should only be out of band memberships.
|
|
|
|
for event_id in sorted_topologically(event_to_auth_chain, event_to_auth_chain):
|
|
|
|
for auth_id in event_to_auth_chain[event_id]:
|
|
|
|
if (
|
|
|
|
auth_id not in chain_map
|
|
|
|
and auth_id not in events_to_calc_chain_id_for
|
|
|
|
):
|
|
|
|
events_to_calc_chain_id_for.discard(event_id)
|
|
|
|
|
|
|
|
# If this is an event we're trying to persist we add it to
|
|
|
|
# the list of events to calculate chain IDs for next time
|
|
|
|
# around. (Otherwise we will have already added it to the
|
|
|
|
# table).
|
2021-01-14 10:18:27 -05:00
|
|
|
room_id = event_to_room_id.get(event_id)
|
|
|
|
if room_id:
|
|
|
|
e_type, state_key = event_to_types[event_id]
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_insert_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_to_calculate",
|
|
|
|
values={
|
2021-01-14 10:18:27 -05:00
|
|
|
"event_id": event_id,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": e_type,
|
|
|
|
"state_key": state_key,
|
2021-01-11 11:09:22 -05:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# We stop checking the event's auth events since we've
|
|
|
|
# discarded it.
|
|
|
|
break
|
|
|
|
|
|
|
|
if not events_to_calc_chain_id_for:
|
|
|
|
return
|
|
|
|
|
2021-01-21 12:00:12 -05:00
|
|
|
# Allocate chain ID/sequence numbers to each new event.
|
|
|
|
new_chain_tuples = cls._allocate_chain_ids(
|
|
|
|
txn,
|
|
|
|
db_pool,
|
2021-02-24 05:13:53 -05:00
|
|
|
event_chain_id_gen,
|
2021-01-21 12:00:12 -05:00
|
|
|
event_to_room_id,
|
|
|
|
event_to_types,
|
|
|
|
event_to_auth_chain,
|
|
|
|
events_to_calc_chain_id_for,
|
|
|
|
chain_map,
|
|
|
|
)
|
|
|
|
chain_map.update(new_chain_tuples)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_insert_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chains",
|
|
|
|
values=[
|
|
|
|
{"event_id": event_id, "chain_id": c_id, "sequence_number": seq}
|
|
|
|
for event_id, (c_id, seq) in new_chain_tuples.items()
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_delete_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_to_calculate",
|
|
|
|
keyvalues={},
|
|
|
|
column="event_id",
|
|
|
|
iterable=new_chain_tuples,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now we need to calculate any new links between chains caused by
|
|
|
|
# the new events.
|
|
|
|
#
|
|
|
|
# Links are pairs of chain ID/sequence numbers such that for any
|
|
|
|
# event A (CA, SA) and any event B (CB, SB), B is in A's auth chain
|
|
|
|
# if and only if there is at least one link (CA, S1) -> (CB, S2)
|
|
|
|
# where SA >= S1 and S2 >= SB.
|
|
|
|
#
|
|
|
|
# We try and avoid adding redundant links to the table, e.g. if we
|
|
|
|
# have two links between two chains which both start/end at the
|
|
|
|
# sequence number event (or cross) then one can be safely dropped.
|
|
|
|
#
|
|
|
|
# To calculate new links we look at every new event and:
|
|
|
|
# 1. Fetch the chain ID/sequence numbers of its auth events,
|
|
|
|
# discarding any that are reachable by other auth events, or
|
|
|
|
# that have the same chain ID as the event.
|
|
|
|
# 2. For each retained auth event we:
|
|
|
|
# a. Add a link from the event's to the auth event's chain
|
|
|
|
# ID/sequence number; and
|
|
|
|
# b. Add a link from the event to every chain reachable by the
|
|
|
|
# auth event.
|
|
|
|
|
|
|
|
# Step 1, fetch all existing links from all the chains we've seen
|
|
|
|
# referenced.
|
|
|
|
chain_links = _LinkMap()
|
2021-01-14 12:19:35 -05:00
|
|
|
rows = db_pool.simple_select_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_links",
|
|
|
|
column="origin_chain_id",
|
|
|
|
iterable={chain_id for chain_id, _ in chain_map.values()},
|
|
|
|
keyvalues={},
|
|
|
|
retcols=(
|
|
|
|
"origin_chain_id",
|
|
|
|
"origin_sequence_number",
|
|
|
|
"target_chain_id",
|
|
|
|
"target_sequence_number",
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for row in rows:
|
|
|
|
chain_links.add_link(
|
|
|
|
(row["origin_chain_id"], row["origin_sequence_number"]),
|
|
|
|
(row["target_chain_id"], row["target_sequence_number"]),
|
|
|
|
new=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We do this in toplogical order to avoid adding redundant links.
|
|
|
|
for event_id in sorted_topologically(
|
|
|
|
events_to_calc_chain_id_for, event_to_auth_chain
|
|
|
|
):
|
|
|
|
chain_id, sequence_number = chain_map[event_id]
|
|
|
|
|
|
|
|
# Filter out auth events that are reachable by other auth
|
|
|
|
# events. We do this by looking at every permutation of pairs of
|
|
|
|
# auth events (A, B) to check if B is reachable from A.
|
|
|
|
reduction = {
|
|
|
|
a_id
|
2021-01-14 10:18:27 -05:00
|
|
|
for a_id in event_to_auth_chain.get(event_id, [])
|
2021-01-11 11:09:22 -05:00
|
|
|
if chain_map[a_id][0] != chain_id
|
|
|
|
}
|
|
|
|
for start_auth_id, end_auth_id in itertools.permutations(
|
2021-02-16 17:32:34 -05:00
|
|
|
event_to_auth_chain.get(event_id, []),
|
|
|
|
r=2,
|
2021-01-11 11:09:22 -05:00
|
|
|
):
|
|
|
|
if chain_links.exists_path_from(
|
|
|
|
chain_map[start_auth_id], chain_map[end_auth_id]
|
|
|
|
):
|
|
|
|
reduction.discard(end_auth_id)
|
|
|
|
|
|
|
|
# Step 2, figure out what the new links are from the reduced
|
|
|
|
# list of auth events.
|
|
|
|
for auth_id in reduction:
|
|
|
|
auth_chain_id, auth_sequence_number = chain_map[auth_id]
|
|
|
|
|
|
|
|
# Step 2a, add link between the event and auth event
|
|
|
|
chain_links.add_link(
|
|
|
|
(chain_id, sequence_number), (auth_chain_id, auth_sequence_number)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Step 2b, add a link to chains reachable from the auth
|
|
|
|
# event.
|
|
|
|
for target_id, target_seq in chain_links.get_links_from(
|
|
|
|
(auth_chain_id, auth_sequence_number)
|
|
|
|
):
|
|
|
|
if target_id == chain_id:
|
|
|
|
continue
|
|
|
|
|
|
|
|
chain_links.add_link(
|
|
|
|
(chain_id, sequence_number), (target_id, target_seq)
|
|
|
|
)
|
|
|
|
|
2021-01-14 12:19:35 -05:00
|
|
|
db_pool.simple_insert_many_txn(
|
2021-01-11 11:09:22 -05:00
|
|
|
txn,
|
|
|
|
table="event_auth_chain_links",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"origin_chain_id": source_id,
|
|
|
|
"origin_sequence_number": source_seq,
|
|
|
|
"target_chain_id": target_id,
|
|
|
|
"target_sequence_number": target_seq,
|
|
|
|
}
|
|
|
|
for (
|
|
|
|
source_id,
|
|
|
|
source_seq,
|
|
|
|
target_id,
|
|
|
|
target_seq,
|
|
|
|
) in chain_links.get_additions()
|
|
|
|
],
|
|
|
|
)
|
2019-07-17 10:33:37 -04:00
|
|
|
|
2021-01-21 12:00:12 -05:00
|
|
|
@staticmethod
|
|
|
|
def _allocate_chain_ids(
|
|
|
|
txn,
|
|
|
|
db_pool: DatabasePool,
|
2021-02-24 05:13:53 -05:00
|
|
|
event_chain_id_gen: SequenceGenerator,
|
2021-01-21 12:00:12 -05:00
|
|
|
event_to_room_id: Dict[str, str],
|
|
|
|
event_to_types: Dict[str, Tuple[str, str]],
|
|
|
|
event_to_auth_chain: Dict[str, List[str]],
|
|
|
|
events_to_calc_chain_id_for: Set[str],
|
|
|
|
chain_map: Dict[str, Tuple[int, int]],
|
|
|
|
) -> Dict[str, Tuple[int, int]]:
|
|
|
|
"""Allocates, but does not persist, chain ID/sequence numbers for the
|
|
|
|
events in `events_to_calc_chain_id_for`. (c.f. _add_chain_cover_index
|
|
|
|
for info on args)
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We now calculate the chain IDs/sequence numbers for the events. We do
|
|
|
|
# this by looking at the chain ID and sequence number of any auth event
|
|
|
|
# with the same type/state_key and incrementing the sequence number by
|
|
|
|
# one. If there was no match or the chain ID/sequence number is already
|
|
|
|
# taken we generate a new chain.
|
|
|
|
#
|
|
|
|
# We try to reduce the number of times that we hit the database by
|
|
|
|
# batching up calls, to make this more efficient when persisting large
|
|
|
|
# numbers of state events (e.g. during joins).
|
|
|
|
#
|
|
|
|
# We do this by:
|
|
|
|
# 1. Calculating for each event which auth event will be used to
|
|
|
|
# inherit the chain ID, i.e. converting the auth chain graph to a
|
|
|
|
# tree that we can allocate chains on. We also keep track of which
|
|
|
|
# existing chain IDs have been referenced.
|
|
|
|
# 2. Fetching the max allocated sequence number for each referenced
|
|
|
|
# existing chain ID, generating a map from chain ID to the max
|
|
|
|
# allocated sequence number.
|
|
|
|
# 3. Iterating over the tree and allocating a chain ID/seq no. to the
|
|
|
|
# new event, by incrementing the sequence number from the
|
|
|
|
# referenced event's chain ID/seq no. and checking that the
|
|
|
|
# incremented sequence number hasn't already been allocated (by
|
|
|
|
# looking in the map generated in the previous step). We generate a
|
|
|
|
# new chain if the sequence number has already been allocated.
|
|
|
|
#
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
existing_chains: Set[int] = set()
|
|
|
|
tree: List[Tuple[str, Optional[str]]] = []
|
2021-01-21 12:00:12 -05:00
|
|
|
|
|
|
|
# We need to do this in a topologically sorted order as we want to
|
|
|
|
# generate chain IDs/sequence numbers of an event's auth events before
|
|
|
|
# the event itself.
|
|
|
|
for event_id in sorted_topologically(
|
|
|
|
events_to_calc_chain_id_for, event_to_auth_chain
|
|
|
|
):
|
|
|
|
for auth_id in event_to_auth_chain.get(event_id, []):
|
|
|
|
if event_to_types.get(event_id) == event_to_types.get(auth_id):
|
|
|
|
existing_chain_id = chain_map.get(auth_id)
|
|
|
|
if existing_chain_id:
|
|
|
|
existing_chains.add(existing_chain_id[0])
|
|
|
|
|
|
|
|
tree.append((event_id, auth_id))
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
tree.append((event_id, None))
|
|
|
|
|
|
|
|
# Fetch the current max sequence number for each existing referenced chain.
|
|
|
|
sql = """
|
|
|
|
SELECT chain_id, MAX(sequence_number) FROM event_auth_chains
|
|
|
|
WHERE %s
|
|
|
|
GROUP BY chain_id
|
|
|
|
"""
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
db_pool.engine, "chain_id", existing_chains
|
|
|
|
)
|
|
|
|
txn.execute(sql % (clause,), args)
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
chain_to_max_seq_no: Dict[Any, int] = {row[0]: row[1] for row in txn}
|
2021-01-21 12:00:12 -05:00
|
|
|
|
|
|
|
# Allocate the new events chain ID/sequence numbers.
|
|
|
|
#
|
|
|
|
# To reduce the number of calls to the database we don't allocate a
|
|
|
|
# chain ID number in the loop, instead we use a temporary `object()` for
|
|
|
|
# each new chain ID. Once we've done the loop we generate the necessary
|
|
|
|
# number of new chain IDs in one call, replacing all temporary
|
|
|
|
# objects with real allocated chain IDs.
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
unallocated_chain_ids: Set[object] = set()
|
|
|
|
new_chain_tuples: Dict[str, Tuple[Any, int]] = {}
|
2021-01-21 12:00:12 -05:00
|
|
|
for event_id, auth_event_id in tree:
|
|
|
|
# If we reference an auth_event_id we fetch the allocated chain ID,
|
|
|
|
# either from the existing `chain_map` or the newly generated
|
|
|
|
# `new_chain_tuples` map.
|
|
|
|
existing_chain_id = None
|
|
|
|
if auth_event_id:
|
|
|
|
existing_chain_id = new_chain_tuples.get(auth_event_id)
|
|
|
|
if not existing_chain_id:
|
|
|
|
existing_chain_id = chain_map[auth_event_id]
|
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
new_chain_tuple: Optional[Tuple[Any, int]] = None
|
2021-01-21 12:00:12 -05:00
|
|
|
if existing_chain_id:
|
|
|
|
# We found a chain ID/sequence number candidate, check its
|
|
|
|
# not already taken.
|
|
|
|
proposed_new_id = existing_chain_id[0]
|
|
|
|
proposed_new_seq = existing_chain_id[1] + 1
|
|
|
|
|
|
|
|
if chain_to_max_seq_no[proposed_new_id] < proposed_new_seq:
|
|
|
|
new_chain_tuple = (
|
|
|
|
proposed_new_id,
|
|
|
|
proposed_new_seq,
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we need to start a new chain we allocate a temporary chain ID.
|
|
|
|
if not new_chain_tuple:
|
|
|
|
new_chain_tuple = (object(), 1)
|
|
|
|
unallocated_chain_ids.add(new_chain_tuple[0])
|
|
|
|
|
|
|
|
new_chain_tuples[event_id] = new_chain_tuple
|
|
|
|
chain_to_max_seq_no[new_chain_tuple[0]] = new_chain_tuple[1]
|
|
|
|
|
|
|
|
# Generate new chain IDs for all unallocated chain IDs.
|
2021-02-24 05:13:53 -05:00
|
|
|
newly_allocated_chain_ids = event_chain_id_gen.get_next_mult_txn(
|
2021-01-21 12:00:12 -05:00
|
|
|
txn, len(unallocated_chain_ids)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Map from potentially temporary chain ID to real chain ID
|
2021-07-15 12:46:54 -04:00
|
|
|
chain_id_to_allocated_map: Dict[Any, int] = dict(
|
2021-01-21 12:00:12 -05:00
|
|
|
zip(unallocated_chain_ids, newly_allocated_chain_ids)
|
2021-07-15 12:46:54 -04:00
|
|
|
)
|
2021-01-21 12:00:12 -05:00
|
|
|
chain_id_to_allocated_map.update((c, c) for c in existing_chains)
|
|
|
|
|
|
|
|
return {
|
|
|
|
event_id: (chain_id_to_allocated_map[chain_id], seq)
|
|
|
|
for event_id, (chain_id, seq) in new_chain_tuples.items()
|
|
|
|
}
|
|
|
|
|
2020-10-13 07:07:56 -04:00
|
|
|
def _persist_transaction_ids_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
):
|
2021-02-16 17:32:34 -05:00
|
|
|
"""Persist the mapping from transaction IDs to event IDs (if defined)."""
|
2020-10-13 07:07:56 -04:00
|
|
|
|
|
|
|
to_insert = []
|
|
|
|
for event, _ in events_and_contexts:
|
|
|
|
token_id = getattr(event.internal_metadata, "token_id", None)
|
|
|
|
txn_id = getattr(event.internal_metadata, "txn_id", None)
|
|
|
|
if token_id and txn_id:
|
|
|
|
to_insert.append(
|
|
|
|
{
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"user_id": event.sender,
|
|
|
|
"token_id": token_id,
|
|
|
|
"txn_id": txn_id,
|
|
|
|
"inserted_ts": self._clock.time_msec(),
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
if to_insert:
|
|
|
|
self.db_pool.simple_insert_many_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
table="event_txn_id",
|
|
|
|
values=to_insert,
|
2020-10-13 07:07:56 -04:00
|
|
|
)
|
|
|
|
|
2020-01-20 13:07:20 -05:00
|
|
|
def _update_current_state_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
state_delta_by_room: Dict[str, DeltaState],
|
|
|
|
stream_id: int,
|
|
|
|
):
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, delta_state in state_delta_by_room.items():
|
2020-01-20 13:07:20 -05:00
|
|
|
to_delete = delta_state.to_delete
|
|
|
|
to_insert = delta_state.to_insert
|
2017-01-20 10:40:04 -05:00
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
if delta_state.no_longer_in_room:
|
|
|
|
# Server is no longer in the room so we delete the room from
|
|
|
|
# current_state_events, being careful we've already updated the
|
|
|
|
# rooms.room_version column (which gets populated in a
|
|
|
|
# background task).
|
|
|
|
self._upsert_room_version_txn(txn, room_id)
|
|
|
|
|
|
|
|
# Before deleting we populate the current_state_delta_stream
|
|
|
|
# so that async background tasks get told what happened.
|
|
|
|
sql = """
|
|
|
|
INSERT INTO current_state_delta_stream
|
2020-10-09 08:10:33 -04:00
|
|
|
(stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id)
|
|
|
|
SELECT ?, ?, room_id, type, state_key, null, event_id
|
2020-01-29 06:01:32 -05:00
|
|
|
FROM current_state_events
|
|
|
|
WHERE room_id = ?
|
|
|
|
"""
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (stream_id, self._instance_name, room_id))
|
2020-01-29 06:01:32 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2021-02-16 17:32:34 -05:00
|
|
|
txn,
|
|
|
|
table="current_state_events",
|
|
|
|
keyvalues={"room_id": room_id},
|
2017-01-20 10:40:04 -05:00
|
|
|
)
|
2020-01-29 06:01:32 -05:00
|
|
|
else:
|
|
|
|
# We're still in the room, so we update the current state as normal.
|
|
|
|
|
|
|
|
# First we add entries to the current_state_delta_stream. We
|
|
|
|
# do this before updating the current_state_events table so
|
|
|
|
# that we can use it to calculate the `prev_event_id`. (This
|
|
|
|
# allows us to not have to pull out the existing state
|
|
|
|
# unnecessarily).
|
|
|
|
#
|
|
|
|
# The stream_id for the update is chosen to be the minimum of the stream_ids
|
|
|
|
# for the batch of the events that we are persisting; that means we do not
|
|
|
|
# end up in a situation where workers see events before the
|
|
|
|
# current_state_delta updates.
|
|
|
|
#
|
|
|
|
sql = """
|
|
|
|
INSERT INTO current_state_delta_stream
|
2020-10-09 08:10:33 -04:00
|
|
|
(stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id)
|
|
|
|
SELECT ?, ?, ?, ?, ?, ?, (
|
2020-01-29 06:01:32 -05:00
|
|
|
SELECT event_id FROM current_state_events
|
|
|
|
WHERE room_id = ? AND type = ? AND state_key = ?
|
2019-03-28 09:37:16 -04:00
|
|
|
)
|
2020-01-29 06:01:32 -05:00
|
|
|
"""
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-29 06:01:32 -05:00
|
|
|
sql,
|
2019-03-28 09:37:16 -04:00
|
|
|
(
|
2020-01-29 06:01:32 -05:00
|
|
|
(
|
|
|
|
stream_id,
|
2020-10-09 08:10:33 -04:00
|
|
|
self._instance_name,
|
2020-01-29 06:01:32 -05:00
|
|
|
room_id,
|
|
|
|
etype,
|
|
|
|
state_key,
|
|
|
|
to_insert.get((etype, state_key)),
|
|
|
|
room_id,
|
|
|
|
etype,
|
|
|
|
state_key,
|
|
|
|
)
|
|
|
|
for etype, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
# Now we actually update the current_state_events table
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-29 06:01:32 -05:00
|
|
|
"DELETE FROM current_state_events"
|
|
|
|
" WHERE room_id = ? AND type = ? AND state_key = ?",
|
|
|
|
(
|
|
|
|
(room_id, etype, state_key)
|
|
|
|
for etype, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
),
|
|
|
|
)
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
# We include the membership in the current state table, hence we do
|
|
|
|
# a lookup when we insert. This assumes that all events have already
|
|
|
|
# been inserted into room_memberships.
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-29 06:01:32 -05:00
|
|
|
"""INSERT INTO current_state_events
|
|
|
|
(room_id, type, state_key, event_id, membership)
|
|
|
|
VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
|
|
|
|
""",
|
|
|
|
[
|
|
|
|
(room_id, key[0], key[1], ev_id, ev_id)
|
2020-06-15 07:03:36 -04:00
|
|
|
for key, ev_id in to_insert.items()
|
2020-01-29 06:01:32 -05:00
|
|
|
],
|
|
|
|
)
|
2017-01-20 10:40:04 -05:00
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
# We now update `local_current_membership`. We do this regardless
|
|
|
|
# of whether we're still in the room or not to handle the case where
|
|
|
|
# e.g. we just got banned (where we need to record that fact here).
|
2017-01-20 10:40:04 -05:00
|
|
|
|
2020-01-15 09:59:33 -05:00
|
|
|
# Note: Do we really want to delete rows here (that we do not
|
|
|
|
# subsequently reinsert below)? While technically correct it means
|
|
|
|
# we have no record of the fact the user *was* a member of the
|
|
|
|
# room but got, say, state reset out of it.
|
|
|
|
if to_delete or to_insert:
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-15 09:59:33 -05:00
|
|
|
"DELETE FROM local_current_membership"
|
|
|
|
" WHERE room_id = ? AND user_id = ?",
|
|
|
|
(
|
|
|
|
(room_id, state_key)
|
|
|
|
for etype, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
if etype == EventTypes.Member and self.is_mine_id(state_key)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
if to_insert:
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-01-15 09:59:33 -05:00
|
|
|
"""INSERT INTO local_current_membership
|
|
|
|
(room_id, user_id, event_id, membership)
|
|
|
|
VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
|
|
|
|
""",
|
|
|
|
[
|
|
|
|
(room_id, key[1], ev_id, ev_id)
|
|
|
|
for key, ev_id in to_insert.items()
|
|
|
|
if key[0] == EventTypes.Member and self.is_mine_id(key[1])
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2019-01-30 05:53:17 -05:00
|
|
|
txn.call_after(
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._curr_state_delta_stream_cache.entity_has_changed,
|
2019-03-28 09:37:16 -04:00
|
|
|
room_id,
|
2019-04-02 07:42:39 -04:00
|
|
|
stream_id,
|
2019-01-30 05:53:17 -05:00
|
|
|
)
|
2017-06-13 04:56:18 -04:00
|
|
|
|
2019-01-30 05:53:17 -05:00
|
|
|
# Invalidate the various caches
|
|
|
|
|
|
|
|
# Figure out the changes of membership to invalidate the
|
|
|
|
# `get_rooms_for_user` cache.
|
|
|
|
# We find out which membership events we may have deleted
|
2021-02-12 11:01:48 -05:00
|
|
|
# and which we have added, then we invalidate the caches for all
|
2019-01-30 05:53:17 -05:00
|
|
|
# those users.
|
2020-02-21 07:15:07 -05:00
|
|
|
members_changed = {
|
2019-01-30 05:53:17 -05:00
|
|
|
state_key
|
|
|
|
for ev_type, state_key in itertools.chain(to_delete, to_insert)
|
|
|
|
if ev_type == EventTypes.Member
|
2020-02-21 07:15:07 -05:00
|
|
|
}
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2019-04-02 07:42:39 -04:00
|
|
|
for member in members_changed:
|
|
|
|
txn.call_after(
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.get_rooms_for_user_with_stream_ordering.invalidate,
|
|
|
|
(member,),
|
2019-04-02 07:42:39 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._invalidate_state_caches_and_stream(
|
|
|
|
txn, room_id, members_changed
|
|
|
|
)
|
2019-01-30 05:53:17 -05:00
|
|
|
|
2020-01-29 06:01:32 -05:00
|
|
|
def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str):
|
|
|
|
"""Update the room version in the database based off current state
|
|
|
|
events.
|
|
|
|
|
|
|
|
This is used when we're about to delete current state and we want to
|
|
|
|
ensure that the `rooms.room_version` column is up to date.
|
|
|
|
"""
|
|
|
|
|
|
|
|
sql = """
|
|
|
|
SELECT json FROM event_json
|
|
|
|
INNER JOIN current_state_events USING (room_id, event_id)
|
|
|
|
WHERE room_id = ? AND type = ? AND state_key = ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (room_id, EventTypes.Create, ""))
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row:
|
2020-07-16 11:32:19 -04:00
|
|
|
event_json = db_to_json(row[0])
|
2020-01-29 06:01:32 -05:00
|
|
|
content = event_json.get("content", {})
|
|
|
|
creator = content.get("creator")
|
|
|
|
room_version_id = content.get("room_version", RoomVersions.V1.identifier)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-01-29 06:01:32 -05:00
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={"room_version": room_version_id},
|
|
|
|
insertion_values={"is_public": False, "creator": creator},
|
|
|
|
)
|
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
def _update_forward_extremities_txn(
|
|
|
|
self, txn, new_forward_extremities, max_stream_order
|
|
|
|
):
|
2021-04-20 06:50:49 -04:00
|
|
|
for room_id in new_forward_extremities.keys():
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
|
2017-01-20 09:28:53 -05:00
|
|
|
)
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store.get_latest_event_ids_in_room.invalidate, (room_id,)
|
|
|
|
)
|
2017-01-20 09:28:53 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2017-01-20 09:28:53 -05:00
|
|
|
txn,
|
|
|
|
table="event_forward_extremities",
|
|
|
|
values=[
|
2019-03-28 09:37:16 -04:00
|
|
|
{"event_id": ev_id, "room_id": room_id}
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, new_extrem in new_forward_extremities.items()
|
2017-01-20 09:28:53 -05:00
|
|
|
for ev_id in new_extrem
|
|
|
|
],
|
|
|
|
)
|
|
|
|
# We now insert into stream_ordering_to_exterm a mapping from room_id,
|
|
|
|
# new stream_ordering to new forward extremeties in the room.
|
|
|
|
# This allows us to later efficiently look up the forward extremeties
|
|
|
|
# for a room before a given stream_ordering
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2017-01-20 09:28:53 -05:00
|
|
|
txn,
|
|
|
|
table="stream_ordering_to_exterm",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"room_id": room_id,
|
|
|
|
"event_id": event_id,
|
|
|
|
"stream_ordering": max_stream_order,
|
|
|
|
}
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, new_extrem in new_forward_extremities.items()
|
2017-01-20 09:28:53 -05:00
|
|
|
for event_id in new_extrem
|
2019-03-28 09:37:16 -04:00
|
|
|
],
|
2017-01-20 09:28:53 -05:00
|
|
|
)
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
@classmethod
|
2020-09-11 07:22:55 -04:00
|
|
|
def _filter_events_and_contexts_for_duplicates(
|
|
|
|
cls, events_and_contexts: List[Tuple[EventBase, EventContext]]
|
|
|
|
) -> List[Tuple[EventBase, EventContext]]:
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Ensure that we don't have the same event twice.
|
|
|
|
|
|
|
|
Pick the earliest non-outlier if there is one, else the earliest one.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]):
|
|
|
|
Returns:
|
|
|
|
list[(EventBase, EventContext)]: filtered list
|
|
|
|
"""
|
2021-07-15 12:46:54 -04:00
|
|
|
new_events_and_contexts: OrderedDict[
|
|
|
|
str, Tuple[EventBase, EventContext]
|
|
|
|
] = OrderedDict()
|
2016-08-03 06:23:39 -04:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
prev_event_context = new_events_and_contexts.get(event.event_id)
|
|
|
|
if prev_event_context:
|
|
|
|
if not event.internal_metadata.is_outlier():
|
|
|
|
if prev_event_context[0].internal_metadata.is_outlier():
|
|
|
|
# To ensure correct ordering we pop, as OrderedDict is
|
|
|
|
# ordered by first insertion.
|
|
|
|
new_events_and_contexts.pop(event.event_id, None)
|
|
|
|
new_events_and_contexts[event.event_id] = (event, context)
|
|
|
|
else:
|
|
|
|
new_events_and_contexts[event.event_id] = (event, context)
|
2018-05-31 05:03:47 -04:00
|
|
|
return list(new_events_and_contexts.values())
|
2016-08-03 06:23:39 -04:00
|
|
|
|
2020-09-11 07:22:55 -04:00
|
|
|
def _update_room_depths_txn(
|
|
|
|
self,
|
|
|
|
txn,
|
|
|
|
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
|
|
|
backfilled: bool,
|
|
|
|
):
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Update min_depth for each room
|
2016-08-03 06:23:39 -04:00
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
|
|
|
backfilled (bool): True if the events were backfilled
|
|
|
|
"""
|
2021-07-15 12:46:54 -04:00
|
|
|
depth_updates: Dict[str, int] = {}
|
2016-02-09 11:19:15 -05:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
# Remove the any existing cache entries for the event_ids
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(self.store._invalidate_get_event_cache, event.event_id)
|
2016-01-28 10:02:37 -05:00
|
|
|
if not backfilled:
|
|
|
|
txn.call_after(
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._events_stream_cache.entity_has_changed,
|
2019-03-28 09:37:16 -04:00
|
|
|
event.room_id,
|
|
|
|
event.internal_metadata.stream_ordering,
|
2016-01-28 10:02:37 -05:00
|
|
|
)
|
|
|
|
|
2016-07-25 13:44:30 -04:00
|
|
|
if not event.internal_metadata.is_outlier() and not context.rejected:
|
2016-02-09 11:19:15 -05:00
|
|
|
depth_updates[event.room_id] = max(
|
|
|
|
event.depth, depth_updates.get(event.room_id, event.depth)
|
|
|
|
)
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, depth in depth_updates.items():
|
2015-06-25 12:18:19 -04:00
|
|
|
self._update_min_depth_for_room_txn(txn, room_id, depth)
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
def _update_outliers_txn(self, txn, events_and_contexts):
|
|
|
|
"""Update any outliers with new event info.
|
|
|
|
|
|
|
|
This turns outliers into ex-outliers (unless the new event was
|
|
|
|
rejected).
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list[(EventBase, EventContext)] new list, without events which
|
|
|
|
are already in the events table.
|
|
|
|
"""
|
2015-06-25 12:18:19 -04:00
|
|
|
txn.execute(
|
2019-03-28 09:37:16 -04:00
|
|
|
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
|
|
|
|
% (",".join(["?"] * len(events_and_contexts)),),
|
|
|
|
[event.event_id for event, _ in events_and_contexts],
|
2015-03-20 09:52:56 -04:00
|
|
|
)
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
have_persisted = {event_id: outlier for event_id, outlier in txn}
|
2015-06-25 12:18:19 -04:00
|
|
|
|
|
|
|
to_remove = set()
|
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if event.event_id not in have_persisted:
|
|
|
|
continue
|
|
|
|
|
|
|
|
to_remove.add(event)
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
if context.rejected:
|
|
|
|
# If the event is rejected then we don't care if the event
|
|
|
|
# was an outlier or not.
|
|
|
|
continue
|
|
|
|
|
2015-06-25 12:18:19 -04:00
|
|
|
outlier_persisted = have_persisted[event.event_id]
|
|
|
|
if not event.internal_metadata.is_outlier() and outlier_persisted:
|
2016-07-26 05:49:52 -04:00
|
|
|
# We received a copy of an event that we had already stored as
|
|
|
|
# an outlier in the database. We now have some state at that
|
|
|
|
# so we need to update the state_groups table with that state.
|
|
|
|
|
2018-02-06 09:31:24 -05:00
|
|
|
# insert into event_to_state_groups.
|
2016-09-02 05:41:38 -04:00
|
|
|
try:
|
2018-02-06 09:31:24 -05:00
|
|
|
self._store_event_state_mappings_txn(txn, ((event, context),))
|
2016-09-02 05:41:38 -04:00
|
|
|
except Exception:
|
|
|
|
logger.exception("")
|
|
|
|
raise
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2021-03-17 08:33:18 -04:00
|
|
|
# update the stored internal_metadata to update the "outlier" flag.
|
|
|
|
# TODO: This is unused as of Synapse 1.31. Remove it once we are happy
|
|
|
|
# to drop backwards-compatibility with 1.30.
|
2020-10-28 11:51:15 -04:00
|
|
|
metadata_json = json_encoder.encode(event.internal_metadata.get_dict())
|
2019-11-21 07:00:14 -05:00
|
|
|
sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?"
|
2019-03-28 09:37:16 -04:00
|
|
|
txn.execute(sql, (metadata_json, event.event_id))
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Add an entry to the ex_outlier_stream table to replicate the
|
|
|
|
# change in outlier status to our workers.
|
2016-03-30 12:19:56 -04:00
|
|
|
stream_order = event.internal_metadata.stream_ordering
|
2016-08-31 05:09:46 -04:00
|
|
|
state_group_id = context.state_group
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2016-03-30 12:19:56 -04:00
|
|
|
txn,
|
|
|
|
table="ex_outlier_stream",
|
|
|
|
values={
|
|
|
|
"event_stream_ordering": stream_order,
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"state_group": state_group_id,
|
2020-10-09 08:10:33 -04:00
|
|
|
"instance_name": self._instance_name,
|
2019-03-28 09:37:16 -04:00
|
|
|
},
|
2016-03-30 12:19:56 -04:00
|
|
|
)
|
|
|
|
|
2019-11-21 07:00:14 -05:00
|
|
|
sql = "UPDATE events SET outlier = ? WHERE event_id = ?"
|
2019-03-28 09:37:16 -04:00
|
|
|
txn.execute(sql, (False, event.event_id))
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Update the event_backward_extremities table now that this
|
|
|
|
# event isn't an outlier any more.
|
2017-01-20 09:40:31 -05:00
|
|
|
self._update_backward_extremeties(txn, [event])
|
2015-09-15 11:34:42 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
|
2015-03-24 12:20:26 -04:00
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
def _store_event_txn(self, txn, events_and_contexts):
|
2021-01-11 08:57:33 -05:00
|
|
|
"""Insert new events into the event, event_json, redaction and
|
|
|
|
state_events tables.
|
2017-03-17 07:51:13 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not events_and_contexts:
|
|
|
|
# nothing to do here
|
|
|
|
return
|
|
|
|
|
|
|
|
def event_dict(event):
|
2017-03-24 06:57:02 -04:00
|
|
|
d = event.get_dict()
|
|
|
|
d.pop("redacted", None)
|
|
|
|
d.pop("redacted_because", None)
|
|
|
|
return d
|
2016-08-04 10:02:15 -04:00
|
|
|
|
2021-03-17 08:33:18 -04:00
|
|
|
def get_internal_metadata(event):
|
|
|
|
im = event.internal_metadata.get_dict()
|
|
|
|
|
|
|
|
# temporary hack for database compatibility with Synapse 1.30 and earlier:
|
|
|
|
# store the `outlier` flag inside the internal_metadata json as well as in
|
|
|
|
# the `events` table, so that if anyone rolls back to an older Synapse,
|
|
|
|
# things keep working. This can be removed once we are happy to drop support
|
|
|
|
# for that
|
|
|
|
if event.internal_metadata.is_outlier():
|
|
|
|
im["outlier"] = True
|
|
|
|
|
|
|
|
return im
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2015-06-25 12:18:19 -04:00
|
|
|
txn,
|
|
|
|
table="event_json",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
2020-10-28 11:51:15 -04:00
|
|
|
"internal_metadata": json_encoder.encode(
|
2021-03-17 08:33:18 -04:00
|
|
|
get_internal_metadata(event)
|
2018-08-01 10:54:06 -04:00
|
|
|
),
|
2020-10-28 11:51:15 -04:00
|
|
|
"json": json_encoder.encode(event_dict(event)),
|
2019-01-23 06:11:52 -05:00
|
|
|
"format_version": event.format_version,
|
2015-06-25 12:18:19 -04:00
|
|
|
}
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
],
|
2015-04-15 05:24:24 -04:00
|
|
|
)
|
2015-03-20 09:52:56 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2015-06-25 12:18:19 -04:00
|
|
|
txn,
|
|
|
|
table="events",
|
|
|
|
values=[
|
|
|
|
{
|
2020-09-14 05:16:41 -04:00
|
|
|
"instance_name": self._instance_name,
|
2015-06-25 12:18:19 -04:00
|
|
|
"stream_ordering": event.internal_metadata.stream_ordering,
|
|
|
|
"topological_ordering": event.depth,
|
|
|
|
"depth": event.depth,
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"type": event.type,
|
|
|
|
"processed": True,
|
|
|
|
"outlier": event.internal_metadata.is_outlier(),
|
2015-11-30 12:45:31 -05:00
|
|
|
"origin_server_ts": int(event.origin_server_ts),
|
2016-04-19 09:24:36 -04:00
|
|
|
"received_ts": self._clock.time_msec(),
|
2016-07-14 10:15:22 -04:00
|
|
|
"sender": event.sender,
|
|
|
|
"contains_url": (
|
2020-06-16 08:51:47 -04:00
|
|
|
"url" in event.content and isinstance(event.content["url"], str)
|
2016-07-14 10:15:22 -04:00
|
|
|
),
|
2015-06-25 12:18:19 -04:00
|
|
|
}
|
2020-08-06 12:15:35 -04:00
|
|
|
for event, _ in events_and_contexts
|
2015-06-25 12:18:19 -04:00
|
|
|
],
|
2015-03-20 09:52:56 -04:00
|
|
|
)
|
|
|
|
|
2021-04-16 09:44:55 -04:00
|
|
|
# If we're persisting an unredacted event we go and ensure
|
|
|
|
# that we mark any redactions that reference this event as
|
|
|
|
# requiring censoring.
|
|
|
|
sql = "UPDATE redactions SET have_censored = ? WHERE redacts = ?"
|
|
|
|
txn.execute_batch(
|
|
|
|
sql,
|
|
|
|
(
|
|
|
|
(
|
|
|
|
False,
|
|
|
|
event.event_id,
|
2019-10-01 06:05:48 -04:00
|
|
|
)
|
2021-04-16 09:44:55 -04:00
|
|
|
for event, _ in events_and_contexts
|
|
|
|
if not event.internal_metadata.is_redacted()
|
|
|
|
),
|
|
|
|
)
|
2019-10-01 06:05:48 -04:00
|
|
|
|
2021-01-11 08:57:33 -05:00
|
|
|
state_events_and_contexts = [
|
|
|
|
ec for ec in events_and_contexts if ec[0].is_state()
|
|
|
|
]
|
|
|
|
|
|
|
|
state_values = []
|
2021-04-20 06:50:49 -04:00
|
|
|
for event, _ in state_events_and_contexts:
|
2021-01-11 08:57:33 -05:00
|
|
|
vals = {
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"type": event.type,
|
|
|
|
"state_key": event.state_key,
|
|
|
|
}
|
|
|
|
|
|
|
|
# TODO: How does this work with backfilling?
|
|
|
|
if hasattr(event, "replaces_state"):
|
|
|
|
vals["prev_state"] = event.replaces_state
|
|
|
|
|
|
|
|
state_values.append(vals)
|
|
|
|
|
|
|
|
self.db_pool.simple_insert_many_txn(
|
|
|
|
txn, table="state_events", values=state_values
|
|
|
|
)
|
|
|
|
|
2017-03-17 07:51:13 -04:00
|
|
|
def _store_rejected_events_txn(self, txn, events_and_contexts):
|
|
|
|
"""Add rows to the 'rejections' table for received events which were
|
|
|
|
rejected
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list[(EventBase, EventContext)] new list, without the rejected
|
|
|
|
events.
|
|
|
|
"""
|
2016-07-26 05:49:52 -04:00
|
|
|
# Remove the rejected events from the list now that we've added them
|
|
|
|
# to the events table and the events_json table.
|
2016-07-25 13:44:30 -04:00
|
|
|
to_remove = set()
|
2016-07-25 11:12:16 -04:00
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if context.rejected:
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert the event_id into the rejections table
|
2019-03-28 09:37:16 -04:00
|
|
|
self._store_rejections_txn(txn, event.event_id, context.rejected)
|
2016-07-26 05:49:52 -04:00
|
|
|
to_remove.add(event)
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-03-28 09:37:16 -04:00
|
|
|
def _update_metadata_tables_txn(
|
|
|
|
self, txn, events_and_contexts, all_events_and_contexts, backfilled
|
|
|
|
):
|
2017-03-17 07:51:13 -04:00
|
|
|
"""Update all the miscellaneous tables for new events
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection): db connection
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
2018-02-20 07:33:04 -05:00
|
|
|
all_events_and_contexts (list[(EventBase, EventContext)]): all
|
|
|
|
events that we were going to persist. This includes events
|
|
|
|
we've already persisted, etc, that wouldn't appear in
|
|
|
|
events_and_context.
|
2017-03-17 07:51:13 -04:00
|
|
|
backfilled (bool): True if the events were backfilled
|
|
|
|
"""
|
|
|
|
|
2018-02-20 07:29:50 -05:00
|
|
|
# Insert all the push actions into the event_push_actions table.
|
|
|
|
self._set_push_actions_for_event_and_users_txn(
|
|
|
|
txn,
|
|
|
|
events_and_contexts=events_and_contexts,
|
2018-02-20 07:33:04 -05:00
|
|
|
all_events_and_contexts=all_events_and_contexts,
|
2018-02-20 07:29:50 -05:00
|
|
|
)
|
|
|
|
|
2016-07-25 13:44:30 -04:00
|
|
|
if not events_and_contexts:
|
2017-03-17 07:51:13 -04:00
|
|
|
# nothing to do here
|
2016-07-25 13:44:30 -04:00
|
|
|
return
|
|
|
|
|
2021-04-20 06:50:49 -04:00
|
|
|
for event, _ in events_and_contexts:
|
2016-07-26 06:05:39 -04:00
|
|
|
if event.type == EventTypes.Redaction and event.redacts is not None:
|
|
|
|
# Remove the entries in the event_push_actions table for the
|
|
|
|
# redacted event.
|
|
|
|
self._remove_push_actions_for_event_id_txn(
|
|
|
|
txn, event.room_id, event.redacts
|
|
|
|
)
|
|
|
|
|
2019-05-20 05:13:05 -04:00
|
|
|
# Remove from relations table.
|
|
|
|
self._handle_redaction(txn, event.redacts)
|
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Update the event_forward_extremities, event_backward_extremities and
|
|
|
|
# event_edges tables.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._handle_mult_prev_events(
|
2019-03-28 09:37:16 -04:00
|
|
|
txn, events=[event for event, _ in events_and_contexts]
|
2016-07-25 13:44:30 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
for event, _ in events_and_contexts:
|
|
|
|
if event.type == EventTypes.Name:
|
2019-08-21 08:16:28 -04:00
|
|
|
# Insert into the event_search table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_name_txn(txn, event)
|
|
|
|
elif event.type == EventTypes.Topic:
|
2019-08-21 08:16:28 -04:00
|
|
|
# Insert into the event_search table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_topic_txn(txn, event)
|
|
|
|
elif event.type == EventTypes.Message:
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert into the event_search table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_message_txn(txn, event)
|
2020-01-23 10:19:03 -05:00
|
|
|
elif event.type == EventTypes.Redaction and event.redacts is not None:
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert into the redactions table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_redaction(txn, event)
|
2019-11-04 12:09:22 -05:00
|
|
|
elif event.type == EventTypes.Retention:
|
|
|
|
# Update the room_retention table.
|
|
|
|
self._store_retention_policy_for_room_txn(txn, event)
|
2016-07-25 13:44:30 -04:00
|
|
|
|
2019-05-14 11:59:21 -04:00
|
|
|
self._handle_event_relations(txn, event)
|
|
|
|
|
2021-07-28 11:46:37 -04:00
|
|
|
self._handle_insertion_event(txn, event)
|
|
|
|
self._handle_chunk_event(txn, event)
|
|
|
|
|
2019-10-29 14:35:49 -04:00
|
|
|
# Store the labels for this event.
|
2019-11-01 12:22:44 -04:00
|
|
|
labels = event.content.get(EventContentFields.LABELS)
|
2019-10-29 14:35:49 -04:00
|
|
|
if labels:
|
2019-11-01 07:47:28 -04:00
|
|
|
self.insert_labels_for_event_txn(
|
|
|
|
txn, event.event_id, labels, event.room_id, event.depth
|
|
|
|
)
|
2019-10-29 14:35:49 -04:00
|
|
|
|
2019-12-03 14:19:45 -05:00
|
|
|
if self._ephemeral_messages_enabled:
|
|
|
|
# If there's an expiry timestamp on the event, store it.
|
|
|
|
expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
|
|
|
|
if isinstance(expiry_ts, int) and not event.is_state():
|
|
|
|
self._insert_event_expiry_txn(txn, event.event_id, expiry_ts)
|
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert into the room_memberships table.
|
2016-07-25 13:44:30 -04:00
|
|
|
self._store_room_members_txn(
|
|
|
|
txn,
|
|
|
|
[
|
|
|
|
event
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
if event.type == EventTypes.Member
|
|
|
|
],
|
|
|
|
backfilled=backfilled,
|
|
|
|
)
|
|
|
|
|
2016-07-26 05:49:52 -04:00
|
|
|
# Insert event_reference_hashes table.
|
2015-06-25 12:18:19 -04:00
|
|
|
self._store_event_reference_hashes_txn(
|
|
|
|
txn, [event for event, _ in events_and_contexts]
|
|
|
|
)
|
|
|
|
|
2016-07-26 06:05:39 -04:00
|
|
|
# Prefill the event cache
|
2016-06-06 06:08:12 -04:00
|
|
|
self._add_to_cache(txn, events_and_contexts)
|
|
|
|
|
|
|
|
def _add_to_cache(self, txn, events_and_contexts):
|
|
|
|
to_prefill = []
|
|
|
|
|
|
|
|
rows = []
|
|
|
|
N = 200
|
|
|
|
for i in range(0, len(events_and_contexts), N):
|
2019-03-28 09:37:16 -04:00
|
|
|
ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
|
2016-06-06 06:08:12 -04:00
|
|
|
if not ev_map:
|
|
|
|
break
|
|
|
|
|
|
|
|
sql = (
|
|
|
|
"SELECT "
|
|
|
|
" e.event_id as event_id, "
|
|
|
|
" r.redacts as redacts,"
|
|
|
|
" rej.event_id as rejects "
|
|
|
|
" FROM events as e"
|
|
|
|
" LEFT JOIN rejections as rej USING (event_id)"
|
|
|
|
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
|
2019-10-02 14:07:07 -04:00
|
|
|
" WHERE "
|
|
|
|
)
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "e.event_id", list(ev_map)
|
|
|
|
)
|
2016-06-06 06:08:12 -04:00
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(sql + clause, args)
|
2020-08-05 16:38:57 -04:00
|
|
|
rows = self.db_pool.cursor_to_dict(txn)
|
2016-06-06 06:08:12 -04:00
|
|
|
for row in rows:
|
|
|
|
event = ev_map[row["event_id"]]
|
|
|
|
if not row["rejects"] and not row["redacts"]:
|
2019-03-28 09:37:16 -04:00
|
|
|
to_prefill.append(
|
|
|
|
_EventCacheEntry(event=event, redacted_event=None)
|
|
|
|
)
|
2016-06-06 06:08:12 -04:00
|
|
|
|
|
|
|
def prefill():
|
|
|
|
for cache_entry in to_prefill:
|
2020-10-19 07:20:29 -04:00
|
|
|
self.store._get_event_cache.set((cache_entry[0].event_id,), cache_entry)
|
2019-03-28 09:37:16 -04:00
|
|
|
|
2016-06-06 06:08:12 -04:00
|
|
|
txn.call_after(prefill)
|
|
|
|
|
2015-03-20 09:52:56 -04:00
|
|
|
def _store_redaction(self, txn, event):
|
|
|
|
# invalidate the cache for the redacted event
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
|
2019-10-01 08:23:34 -04:00
|
|
|
|
2021-07-09 06:03:02 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2019-10-01 08:23:34 -04:00
|
|
|
txn,
|
|
|
|
table="redactions",
|
2021-07-09 06:03:02 -04:00
|
|
|
keyvalues={"event_id": event.event_id},
|
2019-10-01 08:23:34 -04:00
|
|
|
values={
|
|
|
|
"redacts": event.redacts,
|
|
|
|
"received_ts": self._clock.time_msec(),
|
|
|
|
},
|
2015-03-20 09:52:56 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def insert_labels_for_event_txn(
|
|
|
|
self, txn, event_id, labels, room_id, topological_ordering
|
|
|
|
):
|
|
|
|
"""Store the mapping between an event's ID and its labels, with one row per
|
|
|
|
(event_id, label) tuple.
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
txn (LoggingTransaction): The transaction to execute.
|
|
|
|
event_id (str): The event's ID.
|
|
|
|
labels (list[str]): A list of text labels.
|
|
|
|
room_id (str): The ID of the room the event was sent to.
|
|
|
|
topological_ordering (int): The position of the event in the room's topology.
|
2019-08-29 12:38:51 -04:00
|
|
|
"""
|
2020-08-05 16:38:57 -04:00
|
|
|
return self.db_pool.simple_insert_many_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn=txn,
|
|
|
|
table="event_labels",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"event_id": event_id,
|
|
|
|
"label": label,
|
|
|
|
"room_id": room_id,
|
|
|
|
"topological_ordering": topological_ordering,
|
|
|
|
}
|
|
|
|
for label in labels
|
|
|
|
],
|
|
|
|
)
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _insert_event_expiry_txn(self, txn, event_id, expiry_ts):
|
|
|
|
"""Save the expiry timestamp associated with a given event ID.
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
txn (LoggingTransaction): The database transaction to use.
|
|
|
|
event_id (str): The event ID the expiry timestamp is associated with.
|
|
|
|
expiry_ts (int): The timestamp at which to expire (delete) the event.
|
2019-08-29 12:38:51 -04:00
|
|
|
"""
|
2020-08-05 16:38:57 -04:00
|
|
|
return self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn=txn,
|
|
|
|
table="event_expiry",
|
|
|
|
values={"event_id": event_id, "expiry_ts": expiry_ts},
|
2019-12-04 08:52:46 -05:00
|
|
|
)
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_event_reference_hashes_txn(self, txn, events):
|
|
|
|
"""Store a hash for a PDU
|
|
|
|
Args:
|
|
|
|
txn (cursor):
|
|
|
|
events (list): list of Events.
|
|
|
|
"""
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
vals = []
|
|
|
|
for event in events:
|
|
|
|
ref_alg, ref_hash_bytes = compute_event_reference_hash(event)
|
|
|
|
vals.append(
|
|
|
|
{
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"algorithm": ref_alg,
|
|
|
|
"hash": memoryview(ref_hash_bytes),
|
|
|
|
}
|
2019-08-29 12:38:51 -04:00
|
|
|
)
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
|
|
|
txn, table="event_reference_hashes", values=vals
|
|
|
|
)
|
2019-08-29 12:38:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_room_members_txn(self, txn, events, backfilled):
|
2021-02-16 17:32:34 -05:00
|
|
|
"""Store a room member in the database."""
|
2020-09-23 11:42:14 -04:00
|
|
|
|
|
|
|
def str_or_none(val: Any) -> Optional[str]:
|
|
|
|
return val if isinstance(val, str) else None
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-12-03 14:19:45 -05:00
|
|
|
txn,
|
2020-05-13 08:38:22 -04:00
|
|
|
table="room_memberships",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"user_id": event.state_key,
|
|
|
|
"sender": event.user_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"membership": event.membership,
|
2020-09-23 11:42:14 -04:00
|
|
|
"display_name": str_or_none(event.content.get("displayname")),
|
|
|
|
"avatar_url": str_or_none(event.content.get("avatar_url")),
|
2020-05-13 08:38:22 -04:00
|
|
|
}
|
|
|
|
for event in events
|
|
|
|
],
|
2019-12-03 14:19:45 -05:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
for event in events:
|
|
|
|
txn.call_after(
|
|
|
|
self.store._membership_stream_cache.entity_has_changed,
|
|
|
|
event.state_key,
|
|
|
|
event.internal_metadata.stream_ordering,
|
2016-03-01 09:49:41 -05:00
|
|
|
)
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store.get_invited_rooms_for_local_user.invalidate,
|
|
|
|
(event.state_key,),
|
2016-03-01 09:49:41 -05:00
|
|
|
)
|
2016-03-30 12:19:56 -04:00
|
|
|
|
2020-07-07 09:20:40 -04:00
|
|
|
# We update the local_current_membership table only if the event is
|
|
|
|
# "current", i.e., its something that has just happened.
|
|
|
|
#
|
|
|
|
# This will usually get updated by the `current_state_events` handling,
|
|
|
|
# unless its an outlier, and an outlier is only "current" if it's an "out of
|
|
|
|
# band membership", like a remote invite or a rejection of a remote invite.
|
|
|
|
if (
|
|
|
|
self.is_mine_id(event.state_key)
|
|
|
|
and not backfilled
|
|
|
|
and event.internal_metadata.is_outlier()
|
|
|
|
and event.internal_metadata.is_out_of_band_membership()
|
|
|
|
):
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-07-07 09:20:40 -04:00
|
|
|
txn,
|
|
|
|
table="local_current_membership",
|
|
|
|
keyvalues={"room_id": event.room_id, "user_id": event.state_key},
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"membership": event.membership,
|
|
|
|
},
|
|
|
|
)
|
2016-07-05 05:28:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _handle_event_relations(self, txn, event):
|
|
|
|
"""Handles inserting relation data during peristence of events
|
2019-10-30 11:12:49 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
txn
|
|
|
|
event (EventBase)
|
2016-07-04 11:02:50 -04:00
|
|
|
"""
|
2020-05-13 08:38:22 -04:00
|
|
|
relation = event.content.get("m.relates_to")
|
|
|
|
if not relation:
|
|
|
|
# No relations
|
|
|
|
return
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
rel_type = relation.get("rel_type")
|
|
|
|
if rel_type not in (
|
|
|
|
RelationTypes.ANNOTATION,
|
|
|
|
RelationTypes.REFERENCE,
|
|
|
|
RelationTypes.REPLACE,
|
|
|
|
):
|
|
|
|
# Unknown relation type
|
|
|
|
return
|
2016-07-05 05:28:51 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
parent_id = relation.get("event_id")
|
|
|
|
if not parent_id:
|
|
|
|
# Invalid relation
|
|
|
|
return
|
2018-02-14 11:41:12 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
aggregation_key = relation.get("key")
|
2018-02-14 11:41:12 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="event_relations",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"relates_to_id": parent_id,
|
|
|
|
"relation_type": rel_type,
|
|
|
|
"aggregation_key": aggregation_key,
|
|
|
|
},
|
2018-02-14 11:41:12 -05:00
|
|
|
)
|
|
|
|
|
2021-05-27 05:33:56 -04:00
|
|
|
txn.call_after(self.store.get_relations_for_event.invalidate, (parent_id,))
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
2021-05-27 05:33:56 -04:00
|
|
|
self.store.get_aggregation_groups_for_event.invalidate, (parent_id,)
|
2016-07-04 11:02:50 -04:00
|
|
|
)
|
2018-02-14 06:02:22 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if rel_type == RelationTypes.REPLACE:
|
|
|
|
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
|
2018-02-14 06:02:22 -05:00
|
|
|
|
2021-07-28 11:46:37 -04:00
|
|
|
def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase):
|
|
|
|
"""Handles keeping track of insertion events and edges/connections.
|
|
|
|
Part of MSC2716.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction object
|
|
|
|
event: The event to process
|
|
|
|
"""
|
|
|
|
|
|
|
|
if event.type != EventTypes.MSC2716_INSERTION:
|
|
|
|
# Not a insertion event
|
|
|
|
return
|
|
|
|
|
|
|
|
# Skip processing a insertion event if the room version doesn't
|
|
|
|
# support it.
|
|
|
|
room_version = self.store.get_room_version_txn(txn, event.room_id)
|
|
|
|
if not room_version.msc2716_historical:
|
|
|
|
return
|
|
|
|
|
|
|
|
next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID)
|
|
|
|
if next_chunk_id is None:
|
|
|
|
# Invalid insertion event without next chunk ID
|
|
|
|
return
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"_handle_insertion_event (next_chunk_id=%s) %s", next_chunk_id, event
|
|
|
|
)
|
|
|
|
|
|
|
|
# Keep track of the insertion event and the chunk ID
|
|
|
|
self.db_pool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="insertion_events",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"next_chunk_id": next_chunk_id,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# Insert an edge for every prev_event connection
|
|
|
|
for prev_event_id in event.prev_events:
|
|
|
|
self.db_pool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="insertion_event_edges",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"insertion_prev_event_id": prev_event_id,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
def _handle_chunk_event(self, txn: LoggingTransaction, event: EventBase):
|
|
|
|
"""Handles inserting the chunk edges/connections between the chunk event
|
|
|
|
and an insertion event. Part of MSC2716.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction object
|
|
|
|
event: The event to process
|
|
|
|
"""
|
|
|
|
|
|
|
|
if event.type != EventTypes.MSC2716_CHUNK:
|
|
|
|
# Not a chunk event
|
|
|
|
return
|
|
|
|
|
|
|
|
# Skip processing a chunk event if the room version doesn't
|
|
|
|
# support it.
|
|
|
|
room_version = self.store.get_room_version_txn(txn, event.room_id)
|
|
|
|
if not room_version.msc2716_historical:
|
|
|
|
return
|
|
|
|
|
|
|
|
chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID)
|
|
|
|
if chunk_id is None:
|
|
|
|
# Invalid chunk event without a chunk ID
|
|
|
|
return
|
|
|
|
|
|
|
|
logger.debug("_handle_chunk_event chunk_id=%s %s", chunk_id, event)
|
|
|
|
|
|
|
|
# Keep track of the insertion event and the chunk ID
|
|
|
|
self.db_pool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="chunk_events",
|
|
|
|
values={
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"chunk_id": chunk_id,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
# When we receive an event with a `chunk_id` referencing the
|
|
|
|
# `next_chunk_id` of the insertion event, we can remove it from the
|
|
|
|
# `insertion_event_extremities` table.
|
|
|
|
sql = """
|
|
|
|
DELETE FROM insertion_event_extremities WHERE event_id IN (
|
|
|
|
SELECT event_id FROM insertion_events
|
|
|
|
WHERE next_chunk_id = ?
|
|
|
|
)
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (chunk_id,))
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _handle_redaction(self, txn, redacted_event_id):
|
|
|
|
"""Handles receiving a redaction and checking whether we need to remove
|
|
|
|
any redacted relations from the database.
|
2018-09-13 10:05:52 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
txn
|
|
|
|
redacted_event_id (str): The event that was redacted.
|
|
|
|
"""
|
2018-09-13 10:05:52 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_delete_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
|
2018-09-13 10:05:52 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_room_topic_txn(self, txn, event):
|
|
|
|
if hasattr(event, "content") and "topic" in event.content:
|
|
|
|
self.store_event_search_txn(
|
|
|
|
txn, event, "content.topic", event.content["topic"]
|
|
|
|
)
|
2016-09-02 05:41:38 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_room_name_txn(self, txn, event):
|
|
|
|
if hasattr(event, "content") and "name" in event.content:
|
|
|
|
self.store_event_search_txn(
|
|
|
|
txn, event, "content.name", event.content["name"]
|
|
|
|
)
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_room_message_txn(self, txn, event):
|
|
|
|
if hasattr(event, "content") and "body" in event.content:
|
|
|
|
self.store_event_search_txn(
|
|
|
|
txn, event, "content.body", event.content["body"]
|
|
|
|
)
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_retention_policy_for_room_txn(self, txn, event):
|
2020-10-14 07:00:52 -04:00
|
|
|
if not event.is_state():
|
|
|
|
logger.debug("Ignoring non-state m.room.retention event")
|
|
|
|
return
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if hasattr(event, "content") and (
|
|
|
|
"min_lifetime" in event.content or "max_lifetime" in event.content
|
|
|
|
):
|
|
|
|
if (
|
|
|
|
"min_lifetime" in event.content
|
2020-06-16 08:51:47 -04:00
|
|
|
and not isinstance(event.content.get("min_lifetime"), int)
|
2020-05-13 08:38:22 -04:00
|
|
|
) or (
|
|
|
|
"max_lifetime" in event.content
|
2020-06-16 08:51:47 -04:00
|
|
|
and not isinstance(event.content.get("max_lifetime"), int)
|
2020-05-13 08:38:22 -04:00
|
|
|
):
|
|
|
|
# Ignore the event if one of the value isn't an integer.
|
|
|
|
return
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn=txn,
|
|
|
|
table="room_retention",
|
|
|
|
values={
|
|
|
|
"room_id": event.room_id,
|
|
|
|
"event_id": event.event_id,
|
|
|
|
"min_lifetime": event.content.get("min_lifetime"),
|
|
|
|
"max_lifetime": event.content.get("max_lifetime"),
|
|
|
|
},
|
|
|
|
)
|
2016-07-15 09:23:15 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store._invalidate_cache_and_stream(
|
|
|
|
txn, self.store.get_retention_policy_for_room, (event.room_id,)
|
|
|
|
)
|
2016-07-15 09:23:15 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def store_event_search_txn(self, txn, event, key, value):
|
|
|
|
"""Add event to the search table
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
txn (cursor):
|
|
|
|
event (EventBase):
|
|
|
|
key (str):
|
|
|
|
value (str):
|
2019-03-28 09:37:16 -04:00
|
|
|
"""
|
2020-05-13 08:38:22 -04:00
|
|
|
self.store.store_search_entries_txn(
|
|
|
|
txn,
|
|
|
|
(
|
|
|
|
SearchEntry(
|
|
|
|
key=key,
|
|
|
|
value=value,
|
|
|
|
event_id=event.event_id,
|
|
|
|
room_id=event.room_id,
|
|
|
|
stream_ordering=event.internal_metadata.stream_ordering,
|
|
|
|
origin_server_ts=event.origin_server_ts,
|
|
|
|
),
|
|
|
|
),
|
2019-03-28 09:37:16 -04:00
|
|
|
)
|
2016-09-05 09:49:08 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _set_push_actions_for_event_and_users_txn(
|
|
|
|
self, txn, events_and_contexts, all_events_and_contexts
|
|
|
|
):
|
|
|
|
"""Handles moving push actions from staging table to main
|
|
|
|
event_push_actions table for all events in `events_and_contexts`.
|
2018-10-04 10:18:52 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Also ensures that all events in `all_events_and_contexts` are removed
|
|
|
|
from the push action staging area.
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Args:
|
|
|
|
events_and_contexts (list[(EventBase, EventContext)]): events
|
|
|
|
we are persisting
|
|
|
|
all_events_and_contexts (list[(EventBase, EventContext)]): all
|
|
|
|
events that we were going to persist. This includes events
|
|
|
|
we've already persisted, etc, that wouldn't appear in
|
|
|
|
events_and_context.
|
|
|
|
"""
|
2017-05-10 12:46:41 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
sql = """
|
|
|
|
INSERT INTO event_push_actions (
|
|
|
|
room_id, event_id, user_id, actions, stream_ordering,
|
2020-09-02 12:19:37 -04:00
|
|
|
topological_ordering, notif, highlight, unread
|
2016-07-04 11:02:50 -04:00
|
|
|
)
|
2020-09-02 12:19:37 -04:00
|
|
|
SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread
|
2020-05-13 08:38:22 -04:00
|
|
|
FROM event_push_actions_staging
|
|
|
|
WHERE event_id = ?
|
|
|
|
"""
|
2016-07-04 11:02:50 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if events_and_contexts:
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
sql,
|
|
|
|
(
|
|
|
|
(
|
|
|
|
event.room_id,
|
|
|
|
event.internal_metadata.stream_ordering,
|
|
|
|
event.depth,
|
|
|
|
event.event_id,
|
|
|
|
)
|
|
|
|
for event, _ in events_and_contexts
|
|
|
|
),
|
|
|
|
)
|
2018-02-14 10:44:51 -05:00
|
|
|
|
2021-07-15 12:46:54 -04:00
|
|
|
room_to_event_ids: Dict[str, List[str]] = {}
|
2021-04-16 09:44:55 -04:00
|
|
|
for e, _ in events_and_contexts:
|
|
|
|
room_to_event_ids.setdefault(e.room_id, []).append(e.event_id)
|
2018-02-14 10:44:51 -05:00
|
|
|
|
2021-04-16 09:44:55 -04:00
|
|
|
for room_id, event_ids in room_to_event_ids.items():
|
|
|
|
rows = self.db_pool.simple_select_many_txn(
|
|
|
|
txn,
|
|
|
|
table="event_push_actions_staging",
|
|
|
|
column="event_id",
|
|
|
|
iterable=event_ids,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("user_id",),
|
2020-05-13 08:38:22 -04:00
|
|
|
)
|
|
|
|
|
2021-04-16 09:44:55 -04:00
|
|
|
user_ids = {row["user_id"] for row in rows}
|
|
|
|
|
|
|
|
for user_id in user_ids:
|
|
|
|
txn.call_after(
|
2021-05-27 05:33:56 -04:00
|
|
|
self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
|
2021-04-16 09:44:55 -04:00
|
|
|
(room_id, user_id),
|
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
# Now we delete the staging area for *all* events that were being
|
|
|
|
# persisted.
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
"DELETE FROM event_push_actions_staging WHERE event_id = ?",
|
|
|
|
((event.event_id,) for event, _ in all_events_and_contexts),
|
2018-02-14 06:02:22 -05:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
|
|
|
|
# Sad that we have to blow away the cache for the whole room here
|
|
|
|
txn.call_after(
|
2021-05-27 05:33:56 -04:00
|
|
|
self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
|
2019-03-28 09:37:16 -04:00
|
|
|
(room_id,),
|
|
|
|
)
|
2018-02-09 07:13:34 -05:00
|
|
|
txn.execute(
|
2020-05-13 08:38:22 -04:00
|
|
|
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
|
|
|
|
(room_id, event_id),
|
2018-02-09 07:13:34 -05:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_rejections_txn(self, txn, event_id, reason):
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="rejections",
|
|
|
|
values={
|
|
|
|
"event_id": event_id,
|
|
|
|
"reason": reason,
|
|
|
|
"last_check": self._clock.time_msec(),
|
|
|
|
},
|
|
|
|
)
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _store_event_state_mappings_txn(
|
|
|
|
self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
|
|
|
|
):
|
|
|
|
state_groups = {}
|
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if event.internal_metadata.is_outlier():
|
|
|
|
continue
|
2019-11-04 08:36:57 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
# if the event was rejected, just give it the same state as its
|
|
|
|
# predecessor.
|
|
|
|
if context.rejected:
|
|
|
|
state_groups[event.event_id] = context.state_group_before_event
|
|
|
|
continue
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
state_groups[event.event_id] = context.state_group
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="event_to_state_groups",
|
|
|
|
values=[
|
|
|
|
{"state_group": state_group_id, "event_id": event_id}
|
2020-06-15 07:03:36 -04:00
|
|
|
for event_id, state_group_id in state_groups.items()
|
2020-05-13 08:38:22 -04:00
|
|
|
],
|
2019-08-22 05:42:59 -04:00
|
|
|
)
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for event_id, state_group_id in state_groups.items():
|
2020-05-13 08:38:22 -04:00
|
|
|
txn.call_after(
|
|
|
|
self.store._get_state_group_for_event.prefill,
|
|
|
|
(event_id,),
|
|
|
|
state_group_id,
|
2019-08-22 05:42:59 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _update_min_depth_for_room_txn(self, txn, room_id, depth):
|
|
|
|
min_depth = self.store._get_min_depth_interaction(txn, room_id)
|
2019-08-22 05:42:59 -04:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
if min_depth is not None and depth >= min_depth:
|
|
|
|
return
|
2017-04-11 12:34:09 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="room_depth",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={"min_depth": depth},
|
2017-04-11 12:34:09 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _handle_mult_prev_events(self, txn, events):
|
2019-11-01 06:30:51 -04:00
|
|
|
"""
|
2020-05-13 08:38:22 -04:00
|
|
|
For the given event, update the event edges table and forward and
|
|
|
|
backward extremities tables.
|
|
|
|
"""
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2020-05-13 08:38:22 -04:00
|
|
|
txn,
|
|
|
|
table="event_edges",
|
2019-11-01 07:47:28 -04:00
|
|
|
values=[
|
|
|
|
{
|
2020-05-13 08:38:22 -04:00
|
|
|
"event_id": ev.event_id,
|
|
|
|
"prev_event_id": e_id,
|
|
|
|
"room_id": ev.room_id,
|
|
|
|
"is_state": False,
|
2019-11-01 07:47:28 -04:00
|
|
|
}
|
2020-05-13 08:38:22 -04:00
|
|
|
for ev in events
|
|
|
|
for e_id in ev.prev_event_ids()
|
2019-11-01 07:47:28 -04:00
|
|
|
],
|
2019-10-29 14:35:49 -04:00
|
|
|
)
|
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
self._update_backward_extremeties(txn, events)
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
def _update_backward_extremeties(self, txn, events):
|
|
|
|
"""Updates the event_backward_extremities tables based on the new/updated
|
|
|
|
events being persisted.
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
This is called for new events *and* for events that were outliers, but
|
|
|
|
are now being persisted as non-outliers.
|
2019-12-03 14:19:45 -05:00
|
|
|
|
2020-05-13 08:38:22 -04:00
|
|
|
Forward extremities are handled when we first start persisting the events.
|
2019-12-03 14:19:45 -05:00
|
|
|
"""
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
# From the events passed in, add all of the prev events as backwards extremities.
|
|
|
|
# Ignore any events that are already backwards extrems or outliers.
|
2020-05-13 08:38:22 -04:00
|
|
|
query = (
|
|
|
|
"INSERT INTO event_backward_extremities (event_id, room_id)"
|
|
|
|
" SELECT ?, ? WHERE NOT EXISTS ("
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
" SELECT 1 FROM event_backward_extremities"
|
|
|
|
" WHERE event_id = ? AND room_id = ?"
|
2020-05-13 08:38:22 -04:00
|
|
|
" )"
|
|
|
|
" AND NOT EXISTS ("
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
" SELECT 1 FROM events WHERE event_id = ? AND room_id = ? "
|
|
|
|
" AND outlier = ?"
|
2020-05-13 08:38:22 -04:00
|
|
|
" )"
|
2019-12-03 14:19:45 -05:00
|
|
|
)
|
|
|
|
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
query,
|
|
|
|
[
|
|
|
|
(e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
|
|
|
|
for ev in events
|
|
|
|
for e_id in ev.prev_event_ids()
|
|
|
|
if not ev.internal_metadata.is_outlier()
|
|
|
|
],
|
2019-12-03 14:19:45 -05:00
|
|
|
)
|
|
|
|
|
Add support for MSC2716 marker events (#10498)
* Make historical messages available to federated servers
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
Follow-up to https://github.com/matrix-org/synapse/pull/9247
* Debug message not available on federation
* Add base starting insertion point when no chunk ID is provided
* Fix messages from multiple senders in historical chunk
Follow-up to https://github.com/matrix-org/synapse/pull/9247
Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716
---
Previously, Synapse would throw a 403,
`Cannot force another user to join.`,
because we were trying to use `?user_id` from a single virtual user
which did not match with messages from other users in the chunk.
* Remove debug lines
* Messing with selecting insertion event extremeties
* Move db schema change to new version
* Add more better comments
* Make a fake requester with just what we need
See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080
* Store insertion events in table
* Make base insertion event float off on its own
See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889
Conflicts:
synapse/rest/client/v1/room.py
* Validate that the app service can actually control the given user
See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455
Conflicts:
synapse/rest/client/v1/room.py
* Add some better comments on what we're trying to check for
* Continue debugging
* Share validation logic
* Add inserted historical messages to /backfill response
* Remove debug sql queries
* Some marker event implemntation trials
* Clean up PR
* Rename insertion_event_id to just event_id
* Add some better sql comments
* More accurate description
* Add changelog
* Make it clear what MSC the change is part of
* Add more detail on which insertion event came through
* Address review and improve sql queries
* Only use event_id as unique constraint
* Fix test case where insertion event is already in the normal DAG
* Remove debug changes
* Add support for MSC2716 marker events
* Process markers when we receive it over federation
* WIP: make hs2 backfill historical messages after marker event
* hs2 to better ask for insertion event extremity
But running into the `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group`
error
* Add insertion_event_extremities table
* Switch to chunk events so we can auth via power_levels
Previously, we were using `content.chunk_id` to connect one
chunk to another. But these events can be from any `sender`
and we can't tell who should be able to send historical events.
We know we only want the application service to do it but these
events have the sender of a real historical message, not the
application service user ID as the sender. Other federated homeservers
also have no indicator which senders are an application service on
the originating homeserver.
So we want to auth all of the MSC2716 events via power_levels
and have them be sent by the application service with proper
PL levels in the room.
* Switch to chunk events for federation
* Add unstable room version to support new historical PL
* Messy: Fix undefined state_group for federated historical events
```
2021-07-13 02:27:57,810 - synapse.handlers.federation - 1248 - ERROR - GET-4 - Failed to backfill from hs1 because NOT NULL constraint failed: event_to_state_groups.state_group
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1216, in try_backfill
await self.backfill(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 1035, in backfill
await self._auth_and_persist_event(dest, event, context, backfilled=True)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2222, in _auth_and_persist_event
await self._run_push_actions_and_persist_event(event, context, backfilled)
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 2244, in _run_push_actions_and_persist_event
await self.persist_events_and_notify(
File "/usr/local/lib/python3.8/site-packages/synapse/handlers/federation.py", line 3290, in persist_events_and_notify
events, max_stream_token = await self.storage.persistence.persist_events(
File "/usr/local/lib/python3.8/site-packages/synapse/logging/opentracing.py", line 774, in _trace_inner
return await func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 320, in persist_events
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 237, in handle_queue_loop
ret = await self._per_item_callback(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/persist_events.py", line 577, in _persist_event_batch
await self.persist_events_store._persist_events_and_state_updates(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 176, in _persist_events_and_state_updates
await self.db_pool.runInteraction(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 681, in runInteraction
result = await self.runWithConnection(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 770, in runWithConnection
return await make_deferred_yieldable(
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 238, in inContext
result = inContext.theWork() # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/threadpool.py", line 254, in <lambda>
inContext.theWork = lambda: context.call( # type: ignore[attr-defined]
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/python/context.py", line 83, in callWithContext
return func(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/usr/local/lib/python3.8/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/twisted/python/compat.py", line 403, in reraise
raise exception.with_traceback(traceback)
File "/usr/local/lib/python3.8/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 765, in inner_func
return func(db_conn, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 549, in new_transaction
r = func(cursor, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/logging/utils.py", line 69, in wrapped
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 385, in _persist_events_txn
self._store_event_state_mappings_txn(txn, events_and_contexts)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/databases/main/events.py", line 2065, in _store_event_state_mappings_txn
self.db_pool.simple_insert_many_txn(
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 923, in simple_insert_many_txn
txn.execute_batch(sql, vals)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 280, in execute_batch
self.executemany(sql, args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 300, in executemany
self._do_execute(self.txn.executemany, sql, *args)
File "/usr/local/lib/python3.8/site-packages/synapse/storage/database.py", line 330, in _do_execute
return func(sql, *args)
sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group
```
* Revert "Messy: Fix undefined state_group for federated historical events"
This reverts commit 187ab28611546321e02770944c86f30ee2bc742a.
* Fix federated events being rejected for no state_groups
Add fix from https://github.com/matrix-org/synapse/pull/10439
until it merges.
* Adapting to experimental room version
* Some log cleanup
* Add better comments around extremity fetching code and why
* Rename to be more accurate to what the function returns
* Add changelog
* Ignore rejected events
* Use simplified upsert
* Add Erik's explanation of extra event checks
See https://github.com/matrix-org/synapse/pull/10498#discussion_r680880332
* Clarify that the depth is not directly correlated to the backwards extremity that we return
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681725404
* lock only matters for sqlite
See https://github.com/matrix-org/synapse/pull/10498#discussion_r681728061
* Move new SQL changes to its own delta file
* Clean up upsert docstring
* Bump database schema version (62)
2021-08-04 13:07:57 -04:00
|
|
|
# Delete all these events that we've already fetched and now know that their
|
|
|
|
# prev events are the new backwards extremeties.
|
2020-05-13 08:38:22 -04:00
|
|
|
query = (
|
|
|
|
"DELETE FROM event_backward_extremities"
|
|
|
|
" WHERE event_id = ? AND room_id = ?"
|
|
|
|
)
|
2021-01-21 05:22:53 -05:00
|
|
|
txn.execute_batch(
|
2020-05-13 08:38:22 -04:00
|
|
|
query,
|
|
|
|
[
|
|
|
|
(ev.event_id, ev.room_id)
|
|
|
|
for ev in events
|
|
|
|
if not ev.internal_metadata.is_outlier()
|
|
|
|
],
|
|
|
|
)
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
|
|
|
|
@attr.s(slots=True)
|
|
|
|
class _LinkMap:
|
2021-02-16 17:32:34 -05:00
|
|
|
"""A helper type for tracking links between chains."""
|
2021-01-11 11:09:22 -05:00
|
|
|
|
|
|
|
# Stores the set of links as nested maps: source chain ID -> target chain ID
|
|
|
|
# -> source sequence number -> target sequence number.
|
|
|
|
maps = attr.ib(type=Dict[int, Dict[int, Dict[int, int]]], factory=dict)
|
|
|
|
|
|
|
|
# Stores the links that have been added (with new set to true), as tuples of
|
|
|
|
# `(source chain ID, source sequence no, target chain ID, target sequence no.)`
|
|
|
|
additions = attr.ib(type=Set[Tuple[int, int, int, int]], factory=set)
|
|
|
|
|
|
|
|
def add_link(
|
|
|
|
self,
|
|
|
|
src_tuple: Tuple[int, int],
|
|
|
|
target_tuple: Tuple[int, int],
|
|
|
|
new: bool = True,
|
|
|
|
) -> bool:
|
|
|
|
"""Add a new link between two chains, ensuring no redundant links are added.
|
|
|
|
|
|
|
|
New links should be added in topological order.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
src_tuple: The chain ID/sequence number of the source of the link.
|
|
|
|
target_tuple: The chain ID/sequence number of the target of the link.
|
|
|
|
new: Whether this is a "new" link, i.e. should it be returned
|
|
|
|
by `get_additions`.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
True if a link was added, false if the given link was dropped as redundant
|
|
|
|
"""
|
|
|
|
src_chain, src_seq = src_tuple
|
|
|
|
target_chain, target_seq = target_tuple
|
|
|
|
|
|
|
|
current_links = self.maps.setdefault(src_chain, {}).setdefault(target_chain, {})
|
|
|
|
|
|
|
|
assert src_chain != target_chain
|
|
|
|
|
|
|
|
if new:
|
|
|
|
# Check if the new link is redundant
|
|
|
|
for current_seq_src, current_seq_target in current_links.items():
|
|
|
|
# If a link "crosses" another link then its redundant. For example
|
|
|
|
# in the following link 1 (L1) is redundant, as any event reachable
|
|
|
|
# via L1 is *also* reachable via L2.
|
|
|
|
#
|
|
|
|
# Chain A Chain B
|
|
|
|
# | |
|
|
|
|
# L1 |------ |
|
|
|
|
# | | |
|
|
|
|
# L2 |---- | -->|
|
|
|
|
# | | |
|
|
|
|
# | |--->|
|
|
|
|
# | |
|
|
|
|
# | |
|
|
|
|
#
|
|
|
|
# So we only need to keep links which *do not* cross, i.e. links
|
|
|
|
# that both start and end above or below an existing link.
|
|
|
|
#
|
|
|
|
# Note, since we add links in topological ordering we should never
|
|
|
|
# see `src_seq` less than `current_seq_src`.
|
|
|
|
|
|
|
|
if current_seq_src <= src_seq and target_seq <= current_seq_target:
|
|
|
|
# This new link is redundant, nothing to do.
|
|
|
|
return False
|
|
|
|
|
|
|
|
self.additions.add((src_chain, src_seq, target_chain, target_seq))
|
|
|
|
|
|
|
|
current_links[src_seq] = target_seq
|
|
|
|
return True
|
|
|
|
|
|
|
|
def get_links_from(
|
|
|
|
self, src_tuple: Tuple[int, int]
|
|
|
|
) -> Generator[Tuple[int, int], None, None]:
|
|
|
|
"""Gets the chains reachable from the given chain/sequence number.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
The chain ID and sequence number the link points to.
|
|
|
|
"""
|
|
|
|
src_chain, src_seq = src_tuple
|
|
|
|
for target_id, sequence_numbers in self.maps.get(src_chain, {}).items():
|
|
|
|
for link_src_seq, target_seq in sequence_numbers.items():
|
|
|
|
if link_src_seq <= src_seq:
|
|
|
|
yield target_id, target_seq
|
|
|
|
|
|
|
|
def get_links_between(
|
|
|
|
self, source_chain: int, target_chain: int
|
|
|
|
) -> Generator[Tuple[int, int], None, None]:
|
|
|
|
"""Gets the links between two chains.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
The source and target sequence numbers.
|
|
|
|
"""
|
|
|
|
|
|
|
|
yield from self.maps.get(source_chain, {}).get(target_chain, {}).items()
|
|
|
|
|
|
|
|
def get_additions(self) -> Generator[Tuple[int, int, int, int], None, None]:
|
|
|
|
"""Gets any newly added links.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
The source chain ID/sequence number and target chain ID/sequence number
|
|
|
|
"""
|
|
|
|
|
|
|
|
for src_chain, src_seq, target_chain, _ in self.additions:
|
|
|
|
target_seq = self.maps.get(src_chain, {}).get(target_chain, {}).get(src_seq)
|
|
|
|
if target_seq is not None:
|
|
|
|
yield (src_chain, src_seq, target_chain, target_seq)
|
|
|
|
|
|
|
|
def exists_path_from(
|
2021-02-16 17:32:34 -05:00
|
|
|
self,
|
|
|
|
src_tuple: Tuple[int, int],
|
|
|
|
target_tuple: Tuple[int, int],
|
2021-01-11 11:09:22 -05:00
|
|
|
) -> bool:
|
|
|
|
"""Checks if there is a path between the source chain ID/sequence and
|
|
|
|
target chain ID/sequence.
|
|
|
|
"""
|
|
|
|
src_chain, src_seq = src_tuple
|
|
|
|
target_chain, target_seq = target_tuple
|
|
|
|
|
|
|
|
if src_chain == target_chain:
|
|
|
|
return target_seq <= src_seq
|
|
|
|
|
|
|
|
links = self.get_links_between(src_chain, target_chain)
|
|
|
|
for link_start_seq, link_end_seq in links:
|
|
|
|
if link_start_seq <= src_seq and target_seq <= link_end_seq:
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|