2018-02-23 06:01:21 -05:00
|
|
|
# Copyright 2018 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
Optimise missing prev_event handling (#9601)
Background: When we receive incoming federation traffic, and notice that we are missing prev_events from
the incoming traffic, first we do a `/get_missing_events` request, and then if we still have missing prev_events,
we set up new backwards-extremities. To do that, we need to make a `/state_ids` request to ask the remote
server for the state at those prev_events, and then we may need to then ask the remote server for any events
in that state which we don't already have, as well as the auth events for those missing state events, so that we
can auth them.
This PR attempts to optimise the processing of that state request. The `state_ids` API returns a list of the state
events, as well as a list of all the auth events for *all* of those state events. The optimisation comes from the
observation that we are currently loading all of those auth events into memory at the start of the operation, but
we almost certainly aren't going to need *all* of the auth events. Rather, we can check that we have them, and
leave the actual load into memory for later. (Ideally the federation API would tell us which auth events we're
actually going to need, but it doesn't.)
The effect of this is to reduce the number of events that I need to load for an event in Matrix HQ from about
60000 to about 22000, which means it can stay in my in-memory cache, whereas previously the sheer number
of events meant that all 60K events had to be loaded from db for each request, due to the amount of cache
churn. (NB I've already tripled the size of the cache from its default of 10K).
Unfortunately I've ended up basically C&Ping `_get_state_for_room` and `_get_events_from_store_or_dest` into
a new method, because `_get_state_for_room` is also called during backfill, which expects the auth events to be
returned, so the same tricks don't work. That said, I don't really know why that codepath is completely different
(ultimately we're doing the same thing in setting up a new backwards extremity) so I've left a TODO suggesting
that we clean it up.
2021-03-15 09:51:02 -04:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
import logging
|
2019-12-03 09:08:48 -05:00
|
|
|
import threading
|
2018-07-09 02:09:20 -04:00
|
|
|
from collections import namedtuple
|
2021-04-22 11:43:50 -04:00
|
|
|
from typing import (
|
|
|
|
Collection,
|
|
|
|
Container,
|
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Tuple,
|
|
|
|
overload,
|
|
|
|
)
|
2018-07-09 02:09:20 -04:00
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
from constantly import NamedConstant, Names
|
2020-08-18 16:20:49 -04:00
|
|
|
from typing_extensions import Literal
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2018-06-22 04:37:10 -04:00
|
|
|
from twisted.internet import defer
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-04-01 05:24:38 -04:00
|
|
|
from synapse.api.constants import EventTypes
|
2020-05-13 08:38:22 -04:00
|
|
|
from synapse.api.errors import NotFoundError, SynapseError
|
2020-03-04 08:11:04 -05:00
|
|
|
from synapse.api.room_versions import (
|
|
|
|
KNOWN_ROOM_VERSIONS,
|
|
|
|
EventFormatVersions,
|
|
|
|
RoomVersions,
|
|
|
|
)
|
2020-08-18 16:20:49 -04:00
|
|
|
from synapse.events import EventBase, make_event_from_dict
|
2020-10-27 14:42:46 -04:00
|
|
|
from synapse.events.snapshot import EventContext
|
2018-02-23 06:01:21 -05:00
|
|
|
from synapse.events.utils import prune_event
|
2020-03-24 10:45:33 -04:00
|
|
|
from synapse.logging.context import PreserveLoggingContext, current_context
|
2020-10-20 11:29:38 -04:00
|
|
|
from synapse.metrics.background_process_metrics import (
|
|
|
|
run_as_background_process,
|
|
|
|
wrap_as_background_process,
|
|
|
|
)
|
2020-05-15 11:43:59 -04:00
|
|
|
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
2020-07-01 11:35:40 -04:00
|
|
|
from synapse.replication.tcp.streams import BackfillStream
|
|
|
|
from synapse.replication.tcp.streams.events import EventsStream
|
2020-07-16 11:32:19 -04:00
|
|
|
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.database import DatabasePool
|
2020-09-14 05:16:41 -04:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
|
|
|
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
|
2021-02-24 05:13:53 -05:00
|
|
|
from synapse.storage.util.sequence import build_sequence_generator
|
2021-04-22 11:43:50 -04:00
|
|
|
from synapse.types import JsonDict, get_domain_from_id
|
2020-10-14 18:25:23 -04:00
|
|
|
from synapse.util.caches.descriptors import cached
|
2020-10-19 07:20:29 -04:00
|
|
|
from synapse.util.caches.lrucache import LruCache
|
2020-01-14 06:58:02 -05:00
|
|
|
from synapse.util.iterutils import batch_iter
|
2018-02-23 06:01:21 -05:00
|
|
|
from synapse.util.metrics import Measure
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
# These values are used in the `enqueus_event` and `_do_fetch` methods to
|
|
|
|
# control how we batch/bulk fetch events from the database.
|
|
|
|
# The values are plucked out of thing air to make initial sync run faster
|
|
|
|
# on jki.re
|
|
|
|
# TODO: Make these configurable.
|
|
|
|
EVENT_QUEUE_THREADS = 3 # Max number of threads that will fetch events
|
|
|
|
EVENT_QUEUE_ITERATIONS = 3 # No. times we block waiting for requests for events
|
|
|
|
EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
|
|
|
|
|
|
|
|
|
|
|
|
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
|
|
|
|
|
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
class EventRedactBehaviour(Names):
|
|
|
|
"""
|
|
|
|
What to do when retrieving a redacted event from the database.
|
|
|
|
"""
|
|
|
|
|
|
|
|
AS_IS = NamedConstant()
|
|
|
|
REDACT = NamedConstant()
|
|
|
|
BLOCK = NamedConstant()
|
|
|
|
|
|
|
|
|
2018-02-23 06:01:21 -05:00
|
|
|
class EventsWorkerStore(SQLBaseStore):
|
2020-10-02 10:09:31 -04:00
|
|
|
# Whether to use dedicated DB threads for event fetching. This is only used
|
|
|
|
# if there are multiple DB threads available. When used will lock the DB
|
|
|
|
# thread for periods of time (so unit tests want to disable this when they
|
|
|
|
# run DB transactions on the main thread). See EVENT_QUEUE_* for more
|
|
|
|
# options controlling this.
|
|
|
|
USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = True
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
2020-09-18 09:56:44 -04:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-12-03 09:08:48 -05:00
|
|
|
|
2020-09-14 05:16:41 -04:00
|
|
|
if isinstance(database.engine, PostgresEngine):
|
|
|
|
# If we're using Postgres than we can use `MultiWriterIdGenerator`
|
|
|
|
# regardless of whether this process writes to the streams or not.
|
|
|
|
self._stream_id_gen = MultiWriterIdGenerator(
|
|
|
|
db_conn=db_conn,
|
|
|
|
db=database,
|
2020-09-24 11:53:51 -04:00
|
|
|
stream_name="events",
|
2020-09-14 05:16:41 -04:00
|
|
|
instance_name=hs.get_instance_name(),
|
2021-01-18 10:47:59 -05:00
|
|
|
tables=[("events", "instance_name", "stream_ordering")],
|
2020-09-14 05:16:41 -04:00
|
|
|
sequence_name="events_stream_seq",
|
2020-09-24 11:53:51 -04:00
|
|
|
writers=hs.config.worker.writers.events,
|
2020-05-15 11:43:59 -04:00
|
|
|
)
|
2020-09-14 05:16:41 -04:00
|
|
|
self._backfill_id_gen = MultiWriterIdGenerator(
|
|
|
|
db_conn=db_conn,
|
|
|
|
db=database,
|
2020-09-24 11:53:51 -04:00
|
|
|
stream_name="backfill",
|
2020-09-14 05:16:41 -04:00
|
|
|
instance_name=hs.get_instance_name(),
|
2021-01-18 10:47:59 -05:00
|
|
|
tables=[("events", "instance_name", "stream_ordering")],
|
2020-09-14 05:16:41 -04:00
|
|
|
sequence_name="events_backfill_stream_seq",
|
|
|
|
positive=False,
|
2020-09-24 11:53:51 -04:00
|
|
|
writers=hs.config.worker.writers.events,
|
2020-05-15 11:43:59 -04:00
|
|
|
)
|
|
|
|
else:
|
2020-09-14 05:16:41 -04:00
|
|
|
# We shouldn't be running in worker mode with SQLite, but its useful
|
|
|
|
# to support it for unit tests.
|
|
|
|
#
|
|
|
|
# If this process is the writer than we need to use
|
|
|
|
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
|
|
|
|
# updated over replication. (Multiple writers are not supported for
|
|
|
|
# SQLite).
|
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.events:
|
|
|
|
self._stream_id_gen = StreamIdGenerator(
|
|
|
|
db_conn,
|
|
|
|
"events",
|
|
|
|
"stream_ordering",
|
|
|
|
)
|
|
|
|
self._backfill_id_gen = StreamIdGenerator(
|
|
|
|
db_conn,
|
|
|
|
"events",
|
|
|
|
"stream_ordering",
|
|
|
|
step=-1,
|
|
|
|
extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self._stream_id_gen = SlavedIdTracker(
|
|
|
|
db_conn, "events", "stream_ordering"
|
|
|
|
)
|
|
|
|
self._backfill_id_gen = SlavedIdTracker(
|
|
|
|
db_conn, "events", "stream_ordering", step=-1
|
|
|
|
)
|
2020-05-15 11:43:59 -04:00
|
|
|
|
2020-10-16 12:06:17 -04:00
|
|
|
if hs.config.run_background_tasks:
|
2020-10-13 07:07:56 -04:00
|
|
|
# We periodically clean out old transaction ID mappings
|
|
|
|
self._clock.looping_call(
|
2020-10-20 11:29:38 -04:00
|
|
|
self._cleanup_old_transaction_ids,
|
|
|
|
5 * 60 * 1000,
|
2020-10-13 07:07:56 -04:00
|
|
|
)
|
|
|
|
|
2020-10-19 07:20:29 -04:00
|
|
|
self._get_event_cache = LruCache(
|
|
|
|
cache_name="*getEvent*",
|
2020-05-11 13:45:23 -04:00
|
|
|
keylen=3,
|
2020-10-19 07:20:29 -04:00
|
|
|
max_size=hs.config.caches.event_cache_size,
|
2019-12-03 09:08:48 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
self._event_fetch_lock = threading.Condition()
|
|
|
|
self._event_fetch_list = []
|
|
|
|
self._event_fetch_ongoing = 0
|
|
|
|
|
2021-02-24 05:13:53 -05:00
|
|
|
# We define this sequence here so that it can be referenced from both
|
|
|
|
# the DataStore and PersistEventStore.
|
|
|
|
def get_chain_id_txn(txn):
|
|
|
|
txn.execute("SELECT COALESCE(max(chain_id), 0) FROM event_auth_chains")
|
|
|
|
return txn.fetchone()[0]
|
|
|
|
|
|
|
|
self.event_chain_id_gen = build_sequence_generator(
|
|
|
|
db_conn,
|
|
|
|
database.engine,
|
|
|
|
get_chain_id_txn,
|
|
|
|
"event_auth_chain_id",
|
|
|
|
table="event_auth_chains",
|
|
|
|
id_column="chain_id",
|
|
|
|
)
|
|
|
|
|
2020-05-15 11:43:59 -04:00
|
|
|
def process_replication_rows(self, stream_name, instance_name, token, rows):
|
2020-07-01 11:35:40 -04:00
|
|
|
if stream_name == EventsStream.NAME:
|
2020-08-26 08:15:20 -04:00
|
|
|
self._stream_id_gen.advance(instance_name, token)
|
2020-07-01 11:35:40 -04:00
|
|
|
elif stream_name == BackfillStream.NAME:
|
2020-08-26 08:15:20 -04:00
|
|
|
self._backfill_id_gen.advance(instance_name, -token)
|
2020-05-15 11:43:59 -04:00
|
|
|
|
|
|
|
super().process_replication_rows(stream_name, instance_name, token, rows)
|
|
|
|
|
2020-08-26 07:19:32 -04:00
|
|
|
async def get_received_ts(self, event_id: str) -> Optional[int]:
|
2018-04-12 07:07:09 -04:00
|
|
|
"""Get received_ts (when it was persisted) for the event.
|
|
|
|
|
|
|
|
Raises an exception for unknown events.
|
2018-04-11 06:52:19 -04:00
|
|
|
|
|
|
|
Args:
|
2020-08-26 07:19:32 -04:00
|
|
|
event_id: The event ID to query.
|
2018-04-11 06:52:19 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-26 07:19:32 -04:00
|
|
|
Timestamp in milliseconds, or None for events that were persisted
|
|
|
|
before received_ts was implemented.
|
2018-04-11 06:52:19 -04:00
|
|
|
"""
|
2020-08-26 07:19:32 -04:00
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
2018-04-11 06:52:19 -04:00
|
|
|
table="events",
|
2019-04-03 05:07:29 -04:00
|
|
|
keyvalues={"event_id": event_id},
|
2018-04-11 06:52:19 -04:00
|
|
|
retcol="received_ts",
|
|
|
|
desc="get_received_ts",
|
|
|
|
)
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
# Inform mypy that if allow_none is False (the default) then get_event
|
|
|
|
# always returns an EventBase.
|
|
|
|
@overload
|
|
|
|
async def get_event(
|
|
|
|
self,
|
|
|
|
event_id: str,
|
|
|
|
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
|
|
|
get_prev_content: bool = False,
|
|
|
|
allow_rejected: bool = False,
|
|
|
|
allow_none: Literal[False] = False,
|
|
|
|
check_room_id: Optional[str] = None,
|
|
|
|
) -> EventBase:
|
|
|
|
...
|
|
|
|
|
|
|
|
@overload
|
|
|
|
async def get_event(
|
|
|
|
self,
|
|
|
|
event_id: str,
|
|
|
|
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
|
|
|
get_prev_content: bool = False,
|
|
|
|
allow_rejected: bool = False,
|
|
|
|
allow_none: Literal[True] = False,
|
|
|
|
check_room_id: Optional[str] = None,
|
|
|
|
) -> Optional[EventBase]:
|
|
|
|
...
|
|
|
|
|
|
|
|
async def get_event(
|
2019-04-03 05:07:29 -04:00
|
|
|
self,
|
2020-01-06 12:12:06 -05:00
|
|
|
event_id: str,
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
|
|
|
get_prev_content: bool = False,
|
|
|
|
allow_rejected: bool = False,
|
|
|
|
allow_none: bool = False,
|
|
|
|
check_room_id: Optional[str] = None,
|
2020-08-18 16:20:49 -04:00
|
|
|
) -> Optional[EventBase]:
|
2018-02-23 06:01:21 -05:00
|
|
|
"""Get an event from the database by event_id.
|
|
|
|
|
|
|
|
Args:
|
2019-12-11 08:39:47 -05:00
|
|
|
event_id: The event_id of the event to fetch
|
2020-01-06 12:12:06 -05:00
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour: Determine what to do with a redacted event. Possible values:
|
|
|
|
* AS_IS - Return the full event body with no redacted content
|
|
|
|
* REDACT - Return the event but with a redacted body
|
2020-01-06 12:12:06 -05:00
|
|
|
* DISALLOW - Do not return redacted events (behave as per allow_none
|
|
|
|
if the event is redacted)
|
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
get_prev_content: If True and event is a state event,
|
2018-02-23 06:01:21 -05:00
|
|
|
include the previous states content in the unsigned field.
|
2020-01-06 12:12:06 -05:00
|
|
|
|
|
|
|
allow_rejected: If True, return rejected events. Otherwise,
|
|
|
|
behave as per allow_none.
|
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
allow_none: If True, return None if no event found, if
|
2018-08-02 08:23:48 -04:00
|
|
|
False throw a NotFoundError
|
2020-01-06 12:12:06 -05:00
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
check_room_id: if not None, check the room of the found event.
|
2018-08-02 08:23:48 -04:00
|
|
|
If there is a mismatch, behave as per allow_none.
|
2018-02-23 06:01:21 -05:00
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
The event, or None if the event was not found.
|
2018-02-23 06:01:21 -05:00
|
|
|
"""
|
2019-07-24 08:16:18 -04:00
|
|
|
if not isinstance(event_id, str):
|
|
|
|
raise TypeError("Invalid event event_id %r" % (event_id,))
|
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
events = await self.get_events_as_list(
|
2018-02-23 06:01:21 -05:00
|
|
|
[event_id],
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour=redact_behaviour,
|
2018-02-23 06:01:21 -05:00
|
|
|
get_prev_content=get_prev_content,
|
|
|
|
allow_rejected=allow_rejected,
|
|
|
|
)
|
|
|
|
|
2018-08-02 08:23:48 -04:00
|
|
|
event = events[0] if events else None
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2018-08-02 08:23:48 -04:00
|
|
|
if event is not None and check_room_id is not None:
|
|
|
|
if event.room_id != check_room_id:
|
|
|
|
event = None
|
|
|
|
|
|
|
|
if event is None and not allow_none:
|
|
|
|
raise NotFoundError("Could not find event %s" % (event_id,))
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return event
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def get_events(
|
2019-04-03 05:07:29 -04:00
|
|
|
self,
|
2020-08-18 16:20:49 -04:00
|
|
|
event_ids: Iterable[str],
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
|
|
|
get_prev_content: bool = False,
|
|
|
|
allow_rejected: bool = False,
|
2020-08-18 16:20:49 -04:00
|
|
|
) -> Dict[str, EventBase]:
|
2018-02-23 06:01:21 -05:00
|
|
|
"""Get events from the database
|
|
|
|
|
|
|
|
Args:
|
2019-12-11 08:39:47 -05:00
|
|
|
event_ids: The event_ids of the events to fetch
|
2020-01-06 12:12:06 -05:00
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour: Determine what to do with a redacted event. Possible
|
|
|
|
values:
|
|
|
|
* AS_IS - Return the full event body with no redacted content
|
|
|
|
* REDACT - Return the event but with a redacted body
|
2020-01-06 12:12:06 -05:00
|
|
|
* DISALLOW - Do not return redacted events (omit them from the response)
|
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
get_prev_content: If True and event is a state event,
|
2018-02-23 06:01:21 -05:00
|
|
|
include the previous states content in the unsigned field.
|
2020-01-06 12:12:06 -05:00
|
|
|
|
|
|
|
allow_rejected: If True, return rejected events. Otherwise,
|
|
|
|
omits rejeted events from the response.
|
2018-02-23 06:01:21 -05:00
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
A mapping from event_id to event.
|
2018-02-23 06:01:21 -05:00
|
|
|
"""
|
2020-08-18 16:20:49 -04:00
|
|
|
events = await self.get_events_as_list(
|
2018-02-23 06:01:21 -05:00
|
|
|
event_ids,
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour=redact_behaviour,
|
2018-02-23 06:01:21 -05:00
|
|
|
get_prev_content=get_prev_content,
|
|
|
|
allow_rejected=allow_rejected,
|
|
|
|
)
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return {e.event_id: e for e in events}
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def get_events_as_list(
|
2019-04-03 05:07:29 -04:00
|
|
|
self,
|
2020-08-18 16:20:49 -04:00
|
|
|
event_ids: Collection[str],
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
|
|
|
get_prev_content: bool = False,
|
|
|
|
allow_rejected: bool = False,
|
2020-08-18 16:20:49 -04:00
|
|
|
) -> List[EventBase]:
|
2019-05-14 08:37:44 -04:00
|
|
|
"""Get events from the database and return in a list in the same order
|
|
|
|
as given by `event_ids` arg.
|
|
|
|
|
2020-01-06 12:12:06 -05:00
|
|
|
Unknown events will be omitted from the response.
|
|
|
|
|
2019-05-14 08:37:44 -04:00
|
|
|
Args:
|
2019-12-11 08:39:47 -05:00
|
|
|
event_ids: The event_ids of the events to fetch
|
2020-01-06 12:12:06 -05:00
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
redact_behaviour: Determine what to do with a redacted event. Possible values:
|
|
|
|
* AS_IS - Return the full event body with no redacted content
|
|
|
|
* REDACT - Return the event but with a redacted body
|
2020-01-06 12:12:06 -05:00
|
|
|
* DISALLOW - Do not return redacted events (omit them from the response)
|
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
get_prev_content: If True and event is a state event,
|
2019-05-14 08:37:44 -04:00
|
|
|
include the previous states content in the unsigned field.
|
2020-01-06 12:12:06 -05:00
|
|
|
|
|
|
|
allow_rejected: If True, return rejected events. Otherwise,
|
|
|
|
omits rejected events from the response.
|
2019-05-14 08:37:44 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
List of events fetched from the database. The events are in the same
|
|
|
|
order as `event_ids` arg.
|
2019-05-14 08:37:44 -04:00
|
|
|
|
|
|
|
Note that the returned list may be smaller than the list of event
|
|
|
|
IDs if not all events could be fetched.
|
|
|
|
"""
|
|
|
|
|
2018-02-23 06:01:21 -05:00
|
|
|
if not event_ids:
|
2019-07-23 09:00:55 -04:00
|
|
|
return []
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-07-17 11:49:19 -04:00
|
|
|
# there may be duplicates so we cast the list to a set
|
2020-08-18 16:20:49 -04:00
|
|
|
event_entry_map = await self._get_events_from_cache_or_db(
|
2019-07-17 11:49:19 -04:00
|
|
|
set(event_ids), allow_rejected=allow_rejected
|
2018-02-23 06:01:21 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
events = []
|
2019-07-17 11:49:19 -04:00
|
|
|
for event_id in event_ids:
|
2018-02-23 06:01:21 -05:00
|
|
|
entry = event_entry_map.get(event_id, None)
|
|
|
|
if not entry:
|
|
|
|
continue
|
|
|
|
|
2019-07-17 11:49:19 -04:00
|
|
|
if not allow_rejected:
|
|
|
|
assert not entry.event.rejected_reason, (
|
|
|
|
"rejected event returned from _get_events_from_cache_or_db despite "
|
|
|
|
"allow_rejected=False"
|
|
|
|
)
|
|
|
|
|
2019-07-17 12:34:13 -04:00
|
|
|
# We may not have had the original event when we received a redaction, so
|
|
|
|
# we have to recheck auth now.
|
|
|
|
|
2019-01-28 16:09:45 -05:00
|
|
|
if not allow_rejected and entry.event.type == EventTypes.Redaction:
|
2020-01-23 10:19:03 -05:00
|
|
|
if entry.event.redacts is None:
|
2019-10-02 05:14:01 -04:00
|
|
|
# A redacted redaction doesn't have a `redacts` key, in
|
|
|
|
# which case lets just withhold the event.
|
|
|
|
#
|
|
|
|
# Note: Most of the time if the redactions has been
|
|
|
|
# redacted we still have the un-redacted event in the DB
|
|
|
|
# and so we'll still see the `redacts` key. However, this
|
|
|
|
# isn't always true e.g. if we have censored the event.
|
|
|
|
logger.debug(
|
|
|
|
"Withholding redaction event %s as we don't have redacts key",
|
|
|
|
event_id,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2019-07-17 12:34:13 -04:00
|
|
|
redacted_event_id = entry.event.redacts
|
2020-08-18 16:20:49 -04:00
|
|
|
event_map = await self._get_events_from_cache_or_db([redacted_event_id])
|
2019-07-17 12:34:13 -04:00
|
|
|
original_event_entry = event_map.get(redacted_event_id)
|
|
|
|
if not original_event_entry:
|
|
|
|
# we don't have the redacted event (or it was rejected).
|
|
|
|
#
|
|
|
|
# We assume that the redaction isn't authorized for now; if the
|
|
|
|
# redacted event later turns up, the redaction will be re-checked,
|
|
|
|
# and if it is found valid, the original will get redacted before it
|
|
|
|
# is served to the client.
|
|
|
|
logger.debug(
|
|
|
|
"Withholding redaction event %s since we don't (yet) have the "
|
|
|
|
"original %s",
|
|
|
|
event_id,
|
|
|
|
redacted_event_id,
|
2019-01-28 16:09:45 -05:00
|
|
|
)
|
2019-07-17 12:34:13 -04:00
|
|
|
continue
|
2019-01-31 10:34:17 -05:00
|
|
|
|
2019-07-17 12:34:13 -04:00
|
|
|
original_event = original_event_entry.event
|
2019-07-17 14:08:02 -04:00
|
|
|
if original_event.type == EventTypes.Create:
|
|
|
|
# we never serve redactions of Creates to clients.
|
|
|
|
logger.info(
|
|
|
|
"Withholding redaction %s of create event %s",
|
|
|
|
event_id,
|
|
|
|
redacted_event_id,
|
|
|
|
)
|
|
|
|
continue
|
2019-07-17 12:34:13 -04:00
|
|
|
|
2019-07-24 17:44:39 -04:00
|
|
|
if original_event.room_id != entry.event.room_id:
|
|
|
|
logger.info(
|
|
|
|
"Withholding redaction %s of event %s from a different room",
|
|
|
|
event_id,
|
|
|
|
redacted_event_id,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2019-07-17 12:34:13 -04:00
|
|
|
if entry.event.internal_metadata.need_to_check_redaction():
|
|
|
|
original_domain = get_domain_from_id(original_event.sender)
|
|
|
|
redaction_domain = get_domain_from_id(entry.event.sender)
|
|
|
|
if original_domain != redaction_domain:
|
|
|
|
# the senders don't match, so this is forbidden
|
|
|
|
logger.info(
|
|
|
|
"Withholding redaction %s whose sender domain %s doesn't "
|
|
|
|
"match that of redacted event %s %s",
|
|
|
|
event_id,
|
|
|
|
redaction_domain,
|
|
|
|
redacted_event_id,
|
|
|
|
original_domain,
|
|
|
|
)
|
2019-01-28 16:09:45 -05:00
|
|
|
continue
|
|
|
|
|
2019-07-17 12:34:13 -04:00
|
|
|
# Update the cache to save doing the checks again.
|
|
|
|
entry.event.internal_metadata.recheck_redaction = False
|
|
|
|
|
2019-12-11 08:39:47 -05:00
|
|
|
event = entry.event
|
|
|
|
|
|
|
|
if entry.redacted_event:
|
|
|
|
if redact_behaviour == EventRedactBehaviour.BLOCK:
|
|
|
|
# Skip this event
|
|
|
|
continue
|
|
|
|
elif redact_behaviour == EventRedactBehaviour.REDACT:
|
|
|
|
event = entry.redacted_event
|
2019-07-17 11:49:19 -04:00
|
|
|
|
|
|
|
events.append(event)
|
|
|
|
|
|
|
|
if get_prev_content:
|
|
|
|
if "replaces_state" in event.unsigned:
|
2020-08-18 16:20:49 -04:00
|
|
|
prev = await self.get_event(
|
2019-07-17 11:49:19 -04:00
|
|
|
event.unsigned["replaces_state"],
|
|
|
|
get_prev_content=False,
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if prev:
|
|
|
|
event.unsigned = dict(event.unsigned)
|
|
|
|
event.unsigned["prev_content"] = prev.content
|
|
|
|
event.unsigned["prev_sender"] = prev.sender
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return events
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False):
|
2019-07-17 11:49:19 -04:00
|
|
|
"""Fetch a bunch of events from the cache or the database.
|
|
|
|
|
|
|
|
If events are pulled from the database, they will be cached for future lookups.
|
|
|
|
|
2020-01-06 12:12:06 -05:00
|
|
|
Unknown events are omitted from the response.
|
|
|
|
|
2019-07-17 11:49:19 -04:00
|
|
|
Args:
|
2020-01-06 12:12:06 -05:00
|
|
|
|
2019-07-17 11:49:19 -04:00
|
|
|
event_ids (Iterable[str]): The event_ids of the events to fetch
|
2020-01-06 12:12:06 -05:00
|
|
|
|
|
|
|
allow_rejected (bool): Whether to include rejected events. If False,
|
|
|
|
rejected events are omitted from the response.
|
2019-07-17 11:49:19 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
Dict[str, _EventCacheEntry]:
|
2019-07-17 11:49:19 -04:00
|
|
|
map from event id to result
|
|
|
|
"""
|
|
|
|
event_entry_map = self._get_events_from_cache(
|
|
|
|
event_ids, allow_rejected=allow_rejected
|
|
|
|
)
|
|
|
|
|
|
|
|
missing_events_ids = [e for e in event_ids if e not in event_entry_map]
|
|
|
|
|
|
|
|
if missing_events_ids:
|
2020-03-24 10:45:33 -04:00
|
|
|
log_ctx = current_context()
|
2019-07-17 11:49:19 -04:00
|
|
|
log_ctx.record_event_fetch(len(missing_events_ids))
|
|
|
|
|
2019-07-24 11:37:50 -04:00
|
|
|
# Note that _get_events_from_db is also responsible for turning db rows
|
2019-07-17 11:49:19 -04:00
|
|
|
# into FrozenEvents (via _get_event_from_row), which involves seeing if
|
|
|
|
# the events have been redacted, and if so pulling the redaction event out
|
|
|
|
# of the database to check it.
|
|
|
|
#
|
2020-08-18 16:20:49 -04:00
|
|
|
missing_events = await self._get_events_from_db(
|
2019-07-17 11:49:19 -04:00
|
|
|
missing_events_ids, allow_rejected=allow_rejected
|
|
|
|
)
|
|
|
|
|
|
|
|
event_entry_map.update(missing_events)
|
|
|
|
|
|
|
|
return event_entry_map
|
|
|
|
|
2018-02-23 06:01:21 -05:00
|
|
|
def _invalidate_get_event_cache(self, event_id):
|
2019-01-30 05:53:17 -05:00
|
|
|
self._get_event_cache.invalidate((event_id,))
|
2018-02-23 06:01:21 -05:00
|
|
|
|
|
|
|
def _get_events_from_cache(self, events, allow_rejected, update_metrics=True):
|
|
|
|
"""Fetch events from the caches
|
|
|
|
|
|
|
|
Args:
|
2019-07-17 11:49:19 -04:00
|
|
|
events (Iterable[str]): list of event_ids to fetch
|
2019-07-11 08:33:23 -04:00
|
|
|
allow_rejected (bool): Whether to return events that were rejected
|
2018-02-23 06:01:21 -05:00
|
|
|
update_metrics (bool): Whether to update the cache hit ratio metrics
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict of event_id -> _EventCacheEntry for each event_id in cache. If
|
|
|
|
allow_rejected is `False` then there will still be an entry but it
|
|
|
|
will be `None`
|
|
|
|
"""
|
|
|
|
event_map = {}
|
|
|
|
|
|
|
|
for event_id in events:
|
|
|
|
ret = self._get_event_cache.get(
|
2019-04-03 05:07:29 -04:00
|
|
|
(event_id,), None, update_metrics=update_metrics
|
2018-02-23 06:01:21 -05:00
|
|
|
)
|
|
|
|
if not ret:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if allow_rejected or not ret.event.rejected_reason:
|
|
|
|
event_map[event_id] = ret
|
|
|
|
else:
|
|
|
|
event_map[event_id] = None
|
|
|
|
|
|
|
|
return event_map
|
|
|
|
|
2020-10-27 14:42:46 -04:00
|
|
|
async def get_stripped_room_state_from_event_context(
|
|
|
|
self,
|
|
|
|
context: EventContext,
|
2021-03-30 07:12:44 -04:00
|
|
|
state_types_to_include: Container[str],
|
2020-10-29 20:22:31 -04:00
|
|
|
membership_user_id: Optional[str] = None,
|
2020-10-27 14:42:46 -04:00
|
|
|
) -> List[JsonDict]:
|
|
|
|
"""
|
|
|
|
Retrieve the stripped state from a room, given an event context to retrieve state
|
|
|
|
from as well as the state types to include. Optionally, include the membership
|
|
|
|
events from a specific user.
|
|
|
|
|
|
|
|
"Stripped" state means that only the `type`, `state_key`, `content` and `sender` keys
|
|
|
|
are included from each state event.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
context: The event context to retrieve state of the room from.
|
|
|
|
state_types_to_include: The type of state events to include.
|
|
|
|
membership_user_id: An optional user ID to include the stripped membership state
|
|
|
|
events of. This is useful when generating the stripped state of a room for
|
|
|
|
invites. We want to send membership events of the inviter, so that the
|
|
|
|
invitee can display the inviter's profile information if the room lacks any.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A list of dictionaries, each representing a stripped state event from the room.
|
|
|
|
"""
|
|
|
|
current_state_ids = await context.get_current_state_ids()
|
|
|
|
|
|
|
|
# We know this event is not an outlier, so this must be
|
|
|
|
# non-None.
|
|
|
|
assert current_state_ids is not None
|
|
|
|
|
|
|
|
# The state to include
|
|
|
|
state_to_include_ids = [
|
|
|
|
e_id
|
|
|
|
for k, e_id in current_state_ids.items()
|
|
|
|
if k[0] in state_types_to_include
|
|
|
|
or (membership_user_id and k == (EventTypes.Member, membership_user_id))
|
|
|
|
]
|
|
|
|
|
|
|
|
state_to_include = await self.get_events(state_to_include_ids)
|
|
|
|
|
|
|
|
return [
|
|
|
|
{
|
|
|
|
"type": e.type,
|
|
|
|
"state_key": e.state_key,
|
|
|
|
"content": e.content,
|
|
|
|
"sender": e.sender,
|
|
|
|
}
|
|
|
|
for e in state_to_include.values()
|
|
|
|
]
|
|
|
|
|
2018-02-23 06:01:21 -05:00
|
|
|
def _do_fetch(self, conn):
|
|
|
|
"""Takes a database connection and waits for requests for events from
|
|
|
|
the _event_fetch_list queue.
|
|
|
|
"""
|
|
|
|
i = 0
|
|
|
|
while True:
|
2018-07-09 13:06:03 -04:00
|
|
|
with self._event_fetch_lock:
|
|
|
|
event_list = self._event_fetch_list
|
|
|
|
self._event_fetch_list = []
|
|
|
|
|
|
|
|
if not event_list:
|
|
|
|
single_threaded = self.database_engine.single_threaded
|
2020-10-02 10:09:31 -04:00
|
|
|
if (
|
|
|
|
not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING
|
|
|
|
or single_threaded
|
|
|
|
or i > EVENT_QUEUE_ITERATIONS
|
|
|
|
):
|
2018-07-09 13:06:03 -04:00
|
|
|
self._event_fetch_ongoing -= 1
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
i = 0
|
|
|
|
|
|
|
|
self._fetch_event_list(conn, event_list)
|
|
|
|
|
|
|
|
def _fetch_event_list(self, conn, event_list):
|
|
|
|
"""Handle a load of requests from the _event_fetch_list queue
|
|
|
|
|
|
|
|
Args:
|
|
|
|
conn (twisted.enterprise.adbapi.Connection): database connection
|
|
|
|
|
|
|
|
event_list (list[Tuple[list[str], Deferred]]):
|
|
|
|
The fetch requests. Each entry consists of a list of event
|
|
|
|
ids to be fetched, and a deferred to be completed once the
|
|
|
|
events have been fetched.
|
2019-07-24 10:27:53 -04:00
|
|
|
|
|
|
|
The deferreds are callbacked with a dictionary mapping from event id
|
|
|
|
to event row. Note that it may well contain additional events that
|
|
|
|
were not part of this request.
|
2018-07-09 13:06:03 -04:00
|
|
|
"""
|
|
|
|
with Measure(self._clock, "_fetch_event_list"):
|
|
|
|
try:
|
2020-02-21 07:15:07 -05:00
|
|
|
events_to_fetch = {
|
2019-07-24 10:27:53 -04:00
|
|
|
event_id for events, _ in event_list for event_id in events
|
2020-02-21 07:15:07 -05:00
|
|
|
}
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
row_dict = self.db_pool.new_transaction(
|
2019-07-24 10:27:53 -04:00
|
|
|
conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch
|
2018-02-23 06:01:21 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
# We only want to resolve deferreds from the main thread
|
2019-07-24 10:27:53 -04:00
|
|
|
def fire():
|
|
|
|
for _, d in event_list:
|
|
|
|
d.callback(row_dict)
|
2019-04-03 05:07:29 -04:00
|
|
|
|
2018-02-23 06:01:21 -05:00
|
|
|
with PreserveLoggingContext():
|
2019-07-24 10:27:53 -04:00
|
|
|
self.hs.get_reactor().callFromThread(fire)
|
2018-02-23 06:01:21 -05:00
|
|
|
except Exception as e:
|
|
|
|
logger.exception("do_fetch")
|
|
|
|
|
|
|
|
# We only want to resolve deferreds from the main thread
|
2018-08-30 10:19:58 -04:00
|
|
|
def fire(evs, exc):
|
2018-02-23 06:01:21 -05:00
|
|
|
for _, d in evs:
|
|
|
|
if not d.called:
|
|
|
|
with PreserveLoggingContext():
|
2018-08-30 10:19:58 -04:00
|
|
|
d.errback(exc)
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2018-07-09 13:06:03 -04:00
|
|
|
with PreserveLoggingContext():
|
2018-08-30 10:19:58 -04:00
|
|
|
self.hs.get_reactor().callFromThread(fire, event_list, e)
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def _get_events_from_db(self, event_ids, allow_rejected=False):
|
2019-07-24 11:37:50 -04:00
|
|
|
"""Fetch a bunch of events from the database.
|
|
|
|
|
|
|
|
Returned events will be added to the cache for future lookups.
|
2019-07-24 10:27:53 -04:00
|
|
|
|
2020-01-06 12:12:06 -05:00
|
|
|
Unknown events are omitted from the response.
|
|
|
|
|
2019-07-24 10:27:53 -04:00
|
|
|
Args:
|
2019-07-24 11:37:50 -04:00
|
|
|
event_ids (Iterable[str]): The event_ids of the events to fetch
|
2020-01-06 12:12:06 -05:00
|
|
|
|
|
|
|
allow_rejected (bool): Whether to include rejected events. If False,
|
|
|
|
rejected events are omitted from the response.
|
2019-07-24 10:27:53 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
Dict[str, _EventCacheEntry]:
|
2019-07-24 11:44:10 -04:00
|
|
|
map from event id to result. May return extra events which
|
|
|
|
weren't asked for.
|
2018-02-23 06:01:21 -05:00
|
|
|
"""
|
2019-07-24 11:44:10 -04:00
|
|
|
fetched_events = {}
|
|
|
|
events_to_fetch = event_ids
|
|
|
|
|
|
|
|
while events_to_fetch:
|
2020-08-18 16:20:49 -04:00
|
|
|
row_map = await self._enqueue_events(events_to_fetch)
|
2019-07-24 11:44:10 -04:00
|
|
|
|
|
|
|
# we need to recursively fetch any redactions of those events
|
|
|
|
redaction_ids = set()
|
|
|
|
for event_id in events_to_fetch:
|
|
|
|
row = row_map.get(event_id)
|
|
|
|
fetched_events[event_id] = row
|
|
|
|
if row:
|
|
|
|
redaction_ids.update(row["redactions"])
|
|
|
|
|
|
|
|
events_to_fetch = redaction_ids.difference(fetched_events.keys())
|
|
|
|
if events_to_fetch:
|
|
|
|
logger.debug("Also fetching redaction events %s", events_to_fetch)
|
|
|
|
|
2019-07-24 17:45:35 -04:00
|
|
|
# build a map from event_id to EventBase
|
|
|
|
event_map = {}
|
2019-07-24 11:44:10 -04:00
|
|
|
for event_id, row in fetched_events.items():
|
|
|
|
if not row:
|
|
|
|
continue
|
|
|
|
assert row["event_id"] == event_id
|
2019-07-24 10:27:53 -04:00
|
|
|
|
2019-07-24 11:44:10 -04:00
|
|
|
rejected_reason = row["rejected_reason"]
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-07-24 11:44:10 -04:00
|
|
|
if not allow_rejected and rejected_reason:
|
|
|
|
continue
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-19 07:26:03 -04:00
|
|
|
# If the event or metadata cannot be parsed, log the error and act
|
|
|
|
# as if the event is unknown.
|
|
|
|
try:
|
|
|
|
d = db_to_json(row["json"])
|
|
|
|
except ValueError:
|
|
|
|
logger.error("Unable to parse json from event: %s", event_id)
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
internal_metadata = db_to_json(row["internal_metadata"])
|
|
|
|
except ValueError:
|
|
|
|
logger.error(
|
|
|
|
"Unable to parse internal_metadata from event: %s", event_id
|
|
|
|
)
|
|
|
|
continue
|
2019-07-24 17:45:35 -04:00
|
|
|
|
|
|
|
format_version = row["format_version"]
|
|
|
|
if format_version is None:
|
|
|
|
# This means that we stored the event before we had the concept
|
|
|
|
# of a event format version, so it must be a V1 event.
|
|
|
|
format_version = EventFormatVersions.V1
|
|
|
|
|
2020-03-04 08:11:04 -05:00
|
|
|
room_version_id = row["room_version_id"]
|
|
|
|
|
|
|
|
if not room_version_id:
|
2020-08-20 11:42:12 -04:00
|
|
|
# this should only happen for out-of-band membership events which
|
|
|
|
# arrived before #6983 landed. For all other events, we should have
|
|
|
|
# an entry in the 'rooms' table.
|
|
|
|
#
|
|
|
|
# However, the 'out_of_band_membership' flag is unreliable for older
|
|
|
|
# invites, so just accept it for all membership events.
|
|
|
|
#
|
|
|
|
if d["type"] != EventTypes.Member:
|
|
|
|
raise Exception(
|
|
|
|
"Room %s for event %s is unknown" % (d["room_id"], event_id)
|
2020-03-04 08:11:04 -05:00
|
|
|
)
|
|
|
|
|
2020-08-20 11:42:12 -04:00
|
|
|
# so, assuming this is an out-of-band-invite that arrived before #6983
|
|
|
|
# landed, we know that the room version must be v5 or earlier (because
|
|
|
|
# v6 hadn't been invented at that point, so invites from such rooms
|
|
|
|
# would have been rejected.)
|
|
|
|
#
|
|
|
|
# The main reason we need to know the room version here (other than
|
|
|
|
# choosing the right python Event class) is in case the event later has
|
|
|
|
# to be redacted - and all the room versions up to v5 used the same
|
|
|
|
# redaction algorithm.
|
|
|
|
#
|
|
|
|
# So, the following approximations should be adequate.
|
|
|
|
|
2020-03-04 08:11:04 -05:00
|
|
|
if format_version == EventFormatVersions.V1:
|
2020-08-20 11:42:12 -04:00
|
|
|
# if it's event format v1 then it must be room v1 or v2
|
2020-03-04 08:11:04 -05:00
|
|
|
room_version = RoomVersions.V1
|
|
|
|
elif format_version == EventFormatVersions.V2:
|
2020-08-20 11:42:12 -04:00
|
|
|
# if it's event format v2 then it must be room v3
|
2020-03-04 08:11:04 -05:00
|
|
|
room_version = RoomVersions.V3
|
|
|
|
else:
|
2020-08-20 11:42:12 -04:00
|
|
|
# if it's event format v3 then it must be room v4 or v5
|
2020-03-04 08:11:04 -05:00
|
|
|
room_version = RoomVersions.V5
|
|
|
|
else:
|
|
|
|
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
|
|
|
if not room_version:
|
2020-07-17 07:10:43 -04:00
|
|
|
logger.warning(
|
2020-03-04 08:11:04 -05:00
|
|
|
"Event %s in room %s has unknown room version %s",
|
|
|
|
event_id,
|
|
|
|
d["room_id"],
|
|
|
|
room_version_id,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if room_version.event_format != format_version:
|
|
|
|
logger.error(
|
|
|
|
"Event %s in room %s with version %s has wrong format: "
|
|
|
|
"expected %s, was %s",
|
|
|
|
event_id,
|
|
|
|
d["room_id"],
|
|
|
|
room_version_id,
|
|
|
|
room_version.event_format,
|
|
|
|
format_version,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
|
|
|
original_ev = make_event_from_dict(
|
2019-07-24 17:45:35 -04:00
|
|
|
event_dict=d,
|
2020-03-04 08:11:04 -05:00
|
|
|
room_version=room_version,
|
2019-07-24 17:45:35 -04:00
|
|
|
internal_metadata_dict=internal_metadata,
|
|
|
|
rejected_reason=rejected_reason,
|
2019-04-03 05:07:29 -04:00
|
|
|
)
|
2020-10-05 09:43:14 -04:00
|
|
|
original_ev.internal_metadata.stream_ordering = row["stream_ordering"]
|
2021-03-17 08:33:18 -04:00
|
|
|
original_ev.internal_metadata.outlier = row["outlier"]
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-07-24 17:45:35 -04:00
|
|
|
event_map[event_id] = original_ev
|
|
|
|
|
2020-04-11 15:55:18 -04:00
|
|
|
# finally, we can decide whether each one needs redacting, and build
|
2019-07-24 17:45:35 -04:00
|
|
|
# the cache entries.
|
|
|
|
result_map = {}
|
|
|
|
for event_id, original_ev in event_map.items():
|
|
|
|
redactions = fetched_events[event_id]["redactions"]
|
|
|
|
redacted_event = self._maybe_redact_event_row(
|
|
|
|
original_ev, redactions, event_map
|
|
|
|
)
|
|
|
|
|
|
|
|
cache_entry = _EventCacheEntry(
|
|
|
|
event=original_ev, redacted_event=redacted_event
|
|
|
|
)
|
|
|
|
|
2020-10-19 07:20:29 -04:00
|
|
|
self._get_event_cache.set((event_id,), cache_entry)
|
2019-07-24 11:44:10 -04:00
|
|
|
result_map[event_id] = cache_entry
|
|
|
|
|
|
|
|
return result_map
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def _enqueue_events(self, events):
|
2019-07-24 11:37:50 -04:00
|
|
|
"""Fetches events from the database using the _event_fetch_list. This
|
|
|
|
allows batch and bulk fetching of events - it allows us to fetch events
|
|
|
|
without having to create a new transaction for each request for events.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
events (Iterable[str]): events to be fetched.
|
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
Dict[str, Dict]: map from event id to row data from the database.
|
2019-07-24 11:37:50 -04:00
|
|
|
May contain events that weren't requested.
|
|
|
|
"""
|
|
|
|
|
|
|
|
events_d = defer.Deferred()
|
|
|
|
with self._event_fetch_lock:
|
|
|
|
self._event_fetch_list.append((events, events_d))
|
|
|
|
|
|
|
|
self._event_fetch_lock.notify()
|
|
|
|
|
|
|
|
if self._event_fetch_ongoing < EVENT_QUEUE_THREADS:
|
|
|
|
self._event_fetch_ongoing += 1
|
|
|
|
should_start = True
|
|
|
|
else:
|
|
|
|
should_start = False
|
|
|
|
|
|
|
|
if should_start:
|
|
|
|
run_as_background_process(
|
2020-08-05 16:38:57 -04:00
|
|
|
"fetch_events", self.db_pool.runWithConnection, self._do_fetch
|
2019-07-24 11:37:50 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug("Loading %d events: %s", len(events), events)
|
|
|
|
with PreserveLoggingContext():
|
2020-08-18 16:20:49 -04:00
|
|
|
row_map = await events_d
|
2019-07-24 11:37:50 -04:00
|
|
|
logger.debug("Loaded %d events (%d rows)", len(events), len(row_map))
|
|
|
|
|
|
|
|
return row_map
|
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
def _fetch_event_rows(self, txn, event_ids):
|
|
|
|
"""Fetch event rows from the database
|
|
|
|
|
|
|
|
Events which are not found are omitted from the result.
|
|
|
|
|
|
|
|
The returned per-event dicts contain the following keys:
|
|
|
|
|
|
|
|
* event_id (str)
|
|
|
|
|
2020-10-05 09:43:14 -04:00
|
|
|
* stream_ordering (int): stream ordering for this event
|
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
* json (str): json-encoded event structure
|
|
|
|
|
|
|
|
* internal_metadata (str): json-encoded internal metadata dict
|
|
|
|
|
|
|
|
* format_version (int|None): The format of the event. Hopefully one
|
|
|
|
of EventFormatVersions. 'None' means the event predates
|
|
|
|
EventFormatVersions (so the event is format V1).
|
|
|
|
|
2020-03-04 08:11:04 -05:00
|
|
|
* room_version_id (str|None): The version of the room which contains the event.
|
|
|
|
Hopefully one of RoomVersions.
|
|
|
|
|
|
|
|
Due to historical reasons, there may be a few events in the database which
|
|
|
|
do not have an associated room; in this case None will be returned here.
|
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
* rejected_reason (str|None): if the event was rejected, the reason
|
|
|
|
why.
|
|
|
|
|
|
|
|
* redactions (List[str]): a list of event-ids which (claim to) redact
|
|
|
|
this event.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (twisted.enterprise.adbapi.Connection):
|
|
|
|
event_ids (Iterable[str]): event IDs to fetch
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
Returns:
|
|
|
|
Dict[str, Dict]: a map from event id to event info.
|
|
|
|
"""
|
|
|
|
event_dict = {}
|
|
|
|
for evs in batch_iter(event_ids, 200):
|
2020-03-04 08:11:04 -05:00
|
|
|
sql = """\
|
|
|
|
SELECT
|
|
|
|
e.event_id,
|
2020-10-05 09:43:14 -04:00
|
|
|
e.stream_ordering,
|
|
|
|
ej.internal_metadata,
|
|
|
|
ej.json,
|
|
|
|
ej.format_version,
|
2020-03-04 08:11:04 -05:00
|
|
|
r.room_version,
|
2021-03-17 08:33:18 -04:00
|
|
|
rej.reason,
|
|
|
|
e.outlier
|
2020-10-05 09:43:14 -04:00
|
|
|
FROM events AS e
|
|
|
|
JOIN event_json AS ej USING (event_id)
|
|
|
|
LEFT JOIN rooms r ON r.room_id = e.room_id
|
2020-03-04 08:11:04 -05:00
|
|
|
LEFT JOIN rejections as rej USING (event_id)
|
|
|
|
WHERE """
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
txn.database_engine, "e.event_id", evs
|
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(sql + clause, args)
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
for row in txn:
|
|
|
|
event_id = row[0]
|
|
|
|
event_dict[event_id] = {
|
|
|
|
"event_id": event_id,
|
2020-10-05 09:43:14 -04:00
|
|
|
"stream_ordering": row[1],
|
|
|
|
"internal_metadata": row[2],
|
|
|
|
"json": row[3],
|
|
|
|
"format_version": row[4],
|
|
|
|
"room_version_id": row[5],
|
|
|
|
"rejected_reason": row[6],
|
2019-07-17 11:52:02 -04:00
|
|
|
"redactions": [],
|
2021-03-17 08:33:18 -04:00
|
|
|
"outlier": row[7],
|
2019-07-17 11:52:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
# check for redactions
|
2019-10-02 14:07:07 -04:00
|
|
|
redactions_sql = "SELECT event_id, redacts FROM redactions WHERE "
|
|
|
|
|
|
|
|
clause, args = make_in_list_sql_clause(txn.database_engine, "redacts", evs)
|
2019-07-17 11:52:02 -04:00
|
|
|
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(redactions_sql + clause, args)
|
2019-07-17 11:52:02 -04:00
|
|
|
|
|
|
|
for (redacter, redacted) in txn:
|
|
|
|
d = event_dict.get(redacted)
|
|
|
|
if d:
|
|
|
|
d["redactions"].append(redacter)
|
|
|
|
|
|
|
|
return event_dict
|
2018-02-23 06:01:21 -05:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
def _maybe_redact_event_row(
|
|
|
|
self,
|
|
|
|
original_ev: EventBase,
|
|
|
|
redactions: Iterable[str],
|
|
|
|
event_map: Dict[str, EventBase],
|
|
|
|
) -> Optional[EventBase]:
|
2019-07-17 11:52:02 -04:00
|
|
|
"""Given an event object and a list of possible redacting event ids,
|
|
|
|
determine whether to honour any of those redactions and if so return a redacted
|
|
|
|
event.
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 09:21:48 -04:00
|
|
|
original_ev: The original event.
|
|
|
|
redactions: list of event ids of potential redaction events
|
|
|
|
event_map: other events which have been fetched, in which we can
|
|
|
|
look up the redaaction events. Map from event id to event.
|
2019-07-17 11:52:02 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-09-01 09:21:48 -04:00
|
|
|
If the event should be redacted, a pruned event object. Otherwise, None.
|
2019-07-17 11:52:02 -04:00
|
|
|
"""
|
2019-07-17 14:08:02 -04:00
|
|
|
if original_ev.type == "m.room.create":
|
|
|
|
# we choose to ignore redactions of m.room.create events.
|
|
|
|
return None
|
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
for redaction_id in redactions:
|
2019-07-24 17:45:35 -04:00
|
|
|
redaction_event = event_map.get(redaction_id)
|
|
|
|
if not redaction_event or redaction_event.rejected_reason:
|
2019-07-17 11:52:02 -04:00
|
|
|
# we don't have the redaction event, or the redaction event was not
|
|
|
|
# authorized.
|
2019-07-24 17:44:39 -04:00
|
|
|
logger.debug(
|
|
|
|
"%s was redacted by %s but redaction not found/authed",
|
|
|
|
original_ev.event_id,
|
|
|
|
redaction_id,
|
|
|
|
)
|
2019-07-17 11:52:02 -04:00
|
|
|
continue
|
|
|
|
|
2019-07-24 17:44:39 -04:00
|
|
|
if redaction_event.room_id != original_ev.room_id:
|
|
|
|
logger.debug(
|
|
|
|
"%s was redacted by %s but redaction was in a different room!",
|
|
|
|
original_ev.event_id,
|
|
|
|
redaction_id,
|
|
|
|
)
|
|
|
|
continue
|
2019-07-17 11:52:02 -04:00
|
|
|
|
|
|
|
# Starting in room version v3, some redactions need to be
|
|
|
|
# rechecked if we didn't have the redacted event at the
|
|
|
|
# time, so we recheck on read instead.
|
|
|
|
if redaction_event.internal_metadata.need_to_check_redaction():
|
|
|
|
expected_domain = get_domain_from_id(original_ev.sender)
|
|
|
|
if get_domain_from_id(redaction_event.sender) == expected_domain:
|
|
|
|
# This redaction event is allowed. Mark as not needing a recheck.
|
|
|
|
redaction_event.internal_metadata.recheck_redaction = False
|
|
|
|
else:
|
|
|
|
# Senders don't match, so the event isn't actually redacted
|
2019-07-24 17:44:39 -04:00
|
|
|
logger.debug(
|
|
|
|
"%s was redacted by %s but the senders don't match",
|
|
|
|
original_ev.event_id,
|
|
|
|
redaction_id,
|
|
|
|
)
|
2019-07-17 11:52:02 -04:00
|
|
|
continue
|
|
|
|
|
2019-07-24 17:44:39 -04:00
|
|
|
logger.debug("Redacting %s due to %s", original_ev.event_id, redaction_id)
|
|
|
|
|
2019-07-17 11:52:02 -04:00
|
|
|
# we found a good redaction event. Redact!
|
|
|
|
redacted_event = prune_event(original_ev)
|
|
|
|
redacted_event.unsigned["redacted_by"] = redaction_id
|
|
|
|
|
|
|
|
# It's fine to add the event directly, since get_pdu_json
|
|
|
|
# will serialise this field correctly
|
|
|
|
redacted_event.unsigned["redacted_because"] = redaction_event
|
|
|
|
|
|
|
|
return redacted_event
|
|
|
|
|
|
|
|
# no valid redaction found for this event
|
|
|
|
return None
|
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def have_events_in_timeline(self, event_ids):
|
2018-07-26 08:31:59 -04:00
|
|
|
"""Given a list of event ids, check if we have already processed and
|
|
|
|
stored them as non outliers.
|
|
|
|
"""
|
2020-08-18 16:20:49 -04:00
|
|
|
rows = await self.db_pool.simple_select_many_batch(
|
|
|
|
table="events",
|
|
|
|
retcols=("event_id",),
|
|
|
|
column="event_id",
|
|
|
|
iterable=list(event_ids),
|
|
|
|
keyvalues={"outlier": False},
|
|
|
|
desc="have_events_in_timeline",
|
2018-07-26 08:31:59 -04:00
|
|
|
)
|
|
|
|
|
2020-02-21 07:15:07 -05:00
|
|
|
return {r["event_id"] for r in rows}
|
2018-07-26 08:31:59 -04:00
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def have_seen_events(self, event_ids):
|
2018-07-26 08:31:59 -04:00
|
|
|
"""Given a list of event ids, check if we have already processed them.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_ids (iterable[str]):
|
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
set[str]: The events we have already seen.
|
2018-07-26 08:31:59 -04:00
|
|
|
"""
|
Optimise missing prev_event handling (#9601)
Background: When we receive incoming federation traffic, and notice that we are missing prev_events from
the incoming traffic, first we do a `/get_missing_events` request, and then if we still have missing prev_events,
we set up new backwards-extremities. To do that, we need to make a `/state_ids` request to ask the remote
server for the state at those prev_events, and then we may need to then ask the remote server for any events
in that state which we don't already have, as well as the auth events for those missing state events, so that we
can auth them.
This PR attempts to optimise the processing of that state request. The `state_ids` API returns a list of the state
events, as well as a list of all the auth events for *all* of those state events. The optimisation comes from the
observation that we are currently loading all of those auth events into memory at the start of the operation, but
we almost certainly aren't going to need *all* of the auth events. Rather, we can check that we have them, and
leave the actual load into memory for later. (Ideally the federation API would tell us which auth events we're
actually going to need, but it doesn't.)
The effect of this is to reduce the number of events that I need to load for an event in Matrix HQ from about
60000 to about 22000, which means it can stay in my in-memory cache, whereas previously the sheer number
of events meant that all 60K events had to be loaded from db for each request, due to the amount of cache
churn. (NB I've already tripled the size of the cache from its default of 10K).
Unfortunately I've ended up basically C&Ping `_get_state_for_room` and `_get_events_from_store_or_dest` into
a new method, because `_get_state_for_room` is also called during backfill, which expects the auth events to be
returned, so the same tricks don't work. That said, I don't really know why that codepath is completely different
(ultimately we're doing the same thing in setting up a new backwards extremity) so I've left a TODO suggesting
that we clean it up.
2021-03-15 09:51:02 -04:00
|
|
|
# if the event cache contains the event, obviously we've seen it.
|
|
|
|
results = {x for x in event_ids if self._get_event_cache.contains(x)}
|
2018-07-26 08:31:59 -04:00
|
|
|
|
|
|
|
def have_seen_events_txn(txn, chunk):
|
2019-10-02 14:07:07 -04:00
|
|
|
sql = "SELECT event_id FROM events as e WHERE "
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
txn.database_engine, "e.event_id", chunk
|
2018-07-26 08:31:59 -04:00
|
|
|
)
|
2019-10-02 14:07:07 -04:00
|
|
|
txn.execute(sql + clause, args)
|
Optimise missing prev_event handling (#9601)
Background: When we receive incoming federation traffic, and notice that we are missing prev_events from
the incoming traffic, first we do a `/get_missing_events` request, and then if we still have missing prev_events,
we set up new backwards-extremities. To do that, we need to make a `/state_ids` request to ask the remote
server for the state at those prev_events, and then we may need to then ask the remote server for any events
in that state which we don't already have, as well as the auth events for those missing state events, so that we
can auth them.
This PR attempts to optimise the processing of that state request. The `state_ids` API returns a list of the state
events, as well as a list of all the auth events for *all* of those state events. The optimisation comes from the
observation that we are currently loading all of those auth events into memory at the start of the operation, but
we almost certainly aren't going to need *all* of the auth events. Rather, we can check that we have them, and
leave the actual load into memory for later. (Ideally the federation API would tell us which auth events we're
actually going to need, but it doesn't.)
The effect of this is to reduce the number of events that I need to load for an event in Matrix HQ from about
60000 to about 22000, which means it can stay in my in-memory cache, whereas previously the sheer number
of events meant that all 60K events had to be loaded from db for each request, due to the amount of cache
churn. (NB I've already tripled the size of the cache from its default of 10K).
Unfortunately I've ended up basically C&Ping `_get_state_for_room` and `_get_events_from_store_or_dest` into
a new method, because `_get_state_for_room` is also called during backfill, which expects the auth events to be
returned, so the same tricks don't work. That said, I don't really know why that codepath is completely different
(ultimately we're doing the same thing in setting up a new backwards extremity) so I've left a TODO suggesting
that we clean it up.
2021-03-15 09:51:02 -04:00
|
|
|
results.update(row[0] for row in txn)
|
2018-07-26 08:31:59 -04:00
|
|
|
|
Optimise missing prev_event handling (#9601)
Background: When we receive incoming federation traffic, and notice that we are missing prev_events from
the incoming traffic, first we do a `/get_missing_events` request, and then if we still have missing prev_events,
we set up new backwards-extremities. To do that, we need to make a `/state_ids` request to ask the remote
server for the state at those prev_events, and then we may need to then ask the remote server for any events
in that state which we don't already have, as well as the auth events for those missing state events, so that we
can auth them.
This PR attempts to optimise the processing of that state request. The `state_ids` API returns a list of the state
events, as well as a list of all the auth events for *all* of those state events. The optimisation comes from the
observation that we are currently loading all of those auth events into memory at the start of the operation, but
we almost certainly aren't going to need *all* of the auth events. Rather, we can check that we have them, and
leave the actual load into memory for later. (Ideally the federation API would tell us which auth events we're
actually going to need, but it doesn't.)
The effect of this is to reduce the number of events that I need to load for an event in Matrix HQ from about
60000 to about 22000, which means it can stay in my in-memory cache, whereas previously the sheer number
of events meant that all 60K events had to be loaded from db for each request, due to the amount of cache
churn. (NB I've already tripled the size of the cache from its default of 10K).
Unfortunately I've ended up basically C&Ping `_get_state_for_room` and `_get_events_from_store_or_dest` into
a new method, because `_get_state_for_room` is also called during backfill, which expects the auth events to be
returned, so the same tricks don't work. That said, I don't really know why that codepath is completely different
(ultimately we're doing the same thing in setting up a new backwards extremity) so I've left a TODO suggesting
that we clean it up.
2021-03-15 09:51:02 -04:00
|
|
|
for chunk in batch_iter((x for x in event_ids if x not in results), 100):
|
2020-08-18 16:20:49 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-12-04 08:52:46 -05:00
|
|
|
"have_seen_events", have_seen_events_txn, chunk
|
|
|
|
)
|
2019-07-23 09:00:55 -04:00
|
|
|
return results
|
2018-07-26 08:31:59 -04:00
|
|
|
|
2019-05-29 11:47:16 -04:00
|
|
|
def _get_current_state_event_counts_txn(self, txn, room_id):
|
|
|
|
"""
|
|
|
|
See get_current_state_event_counts.
|
|
|
|
"""
|
|
|
|
sql = "SELECT COUNT(*) FROM current_state_events WHERE room_id=?"
|
|
|
|
txn.execute(sql, (room_id,))
|
|
|
|
row = txn.fetchone()
|
|
|
|
return row[0] if row else 0
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def get_current_state_event_counts(self, room_id: str) -> int:
|
2019-05-29 11:47:16 -04:00
|
|
|
"""
|
|
|
|
Gets the current number of state events in a room.
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 09:21:48 -04:00
|
|
|
room_id: The room ID to query.
|
2019-05-29 11:47:16 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-09-01 09:21:48 -04:00
|
|
|
The current number of state events.
|
2019-05-29 11:47:16 -04:00
|
|
|
"""
|
2020-09-01 09:21:48 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2019-05-29 11:47:16 -04:00
|
|
|
"get_current_state_event_counts",
|
|
|
|
self._get_current_state_event_counts_txn,
|
|
|
|
room_id,
|
|
|
|
)
|
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
async def get_room_complexity(self, room_id):
|
2019-05-29 11:47:16 -04:00
|
|
|
"""
|
|
|
|
Get a rough approximation of the complexity of the room. This is used by
|
|
|
|
remote servers to decide whether they wish to join the room or not.
|
|
|
|
Higher complexity value indicates that being in the room will consume
|
|
|
|
more resources.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
|
|
|
|
Returns:
|
2020-08-18 16:20:49 -04:00
|
|
|
dict[str:int] of complexity version to complexity.
|
2019-05-29 11:47:16 -04:00
|
|
|
"""
|
2020-08-18 16:20:49 -04:00
|
|
|
state_events = await self.get_current_state_event_counts(room_id)
|
2019-05-29 11:47:16 -04:00
|
|
|
|
|
|
|
# Call this one "v1", so we can introduce new ones as we want to develop
|
|
|
|
# it.
|
|
|
|
complexity_v1 = round(state_events / 500, 2)
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return {"v1": complexity_v1}
|
2020-03-25 10:54:01 -04:00
|
|
|
|
|
|
|
def get_current_events_token(self):
|
|
|
|
"""The current maximum token that events have reached"""
|
|
|
|
return self._stream_id_gen.get_current_token()
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def get_all_new_forward_event_rows(
|
2020-10-09 08:10:33 -04:00
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
2020-09-01 09:21:48 -04:00
|
|
|
) -> List[Tuple]:
|
2020-04-23 13:19:08 -04:00
|
|
|
"""Returns new events, for the Events replication stream
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_id: the last stream_id from the previous batch.
|
|
|
|
current_id: the maximum stream_id to return up to
|
|
|
|
limit: the maximum number of rows to return
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
Returns:
|
2020-04-23 13:19:08 -04:00
|
|
|
a list of events stream rows. Each tuple consists of a stream id as
|
|
|
|
the first element, followed by fields suitable for casting into an
|
|
|
|
EventsStreamRow.
|
|
|
|
"""
|
2020-03-25 10:54:01 -04:00
|
|
|
|
|
|
|
def get_all_new_forward_event_rows(txn):
|
|
|
|
sql = (
|
|
|
|
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
|
2020-10-28 08:11:45 -04:00
|
|
|
" state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL"
|
2020-03-25 10:54:01 -04:00
|
|
|
" FROM events AS e"
|
|
|
|
" LEFT JOIN redactions USING (event_id)"
|
|
|
|
" LEFT JOIN state_events USING (event_id)"
|
|
|
|
" LEFT JOIN event_relations USING (event_id)"
|
2020-10-28 08:11:45 -04:00
|
|
|
" LEFT JOIN room_memberships USING (event_id)"
|
|
|
|
" LEFT JOIN rejections USING (event_id)"
|
2020-03-25 10:54:01 -04:00
|
|
|
" WHERE ? < stream_ordering AND stream_ordering <= ?"
|
2020-10-09 08:10:33 -04:00
|
|
|
" AND instance_name = ?"
|
2020-03-25 10:54:01 -04:00
|
|
|
" ORDER BY stream_ordering ASC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (last_id, current_id, instance_name, limit))
|
2020-04-23 13:19:08 -04:00
|
|
|
return txn.fetchall()
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-04-23 13:19:08 -04:00
|
|
|
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
|
|
|
|
)
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def get_ex_outlier_stream_rows(
|
2020-10-09 08:10:33 -04:00
|
|
|
self, instance_name: str, last_id: int, current_id: int
|
2020-09-01 09:21:48 -04:00
|
|
|
) -> List[Tuple]:
|
2020-04-23 13:19:08 -04:00
|
|
|
"""Returns de-outliered events, for the Events replication stream
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_id: the last stream_id from the previous batch.
|
|
|
|
current_id: the maximum stream_id to return up to
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
Returns:
|
2020-04-23 13:19:08 -04:00
|
|
|
a list of events stream rows. Each tuple consists of a stream id as
|
|
|
|
the first element, followed by fields suitable for casting into an
|
|
|
|
EventsStreamRow.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def get_ex_outlier_stream_rows_txn(txn):
|
2020-03-25 10:54:01 -04:00
|
|
|
sql = (
|
|
|
|
"SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
|
2020-10-28 08:11:45 -04:00
|
|
|
" state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL"
|
2020-03-25 10:54:01 -04:00
|
|
|
" FROM events AS e"
|
2020-10-09 08:10:33 -04:00
|
|
|
" INNER JOIN ex_outlier_stream AS out USING (event_id)"
|
2020-03-25 10:54:01 -04:00
|
|
|
" LEFT JOIN redactions USING (event_id)"
|
|
|
|
" LEFT JOIN state_events USING (event_id)"
|
|
|
|
" LEFT JOIN event_relations USING (event_id)"
|
2020-10-28 08:11:45 -04:00
|
|
|
" LEFT JOIN room_memberships USING (event_id)"
|
|
|
|
" LEFT JOIN rejections USING (event_id)"
|
2020-03-25 10:54:01 -04:00
|
|
|
" WHERE ? < event_stream_ordering"
|
|
|
|
" AND event_stream_ordering <= ?"
|
2020-10-09 08:10:33 -04:00
|
|
|
" AND out.instance_name = ?"
|
2020-04-23 13:19:08 -04:00
|
|
|
" ORDER BY event_stream_ordering ASC"
|
2020-03-25 10:54:01 -04:00
|
|
|
)
|
|
|
|
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (last_id, current_id, instance_name))
|
2020-04-23 13:19:08 -04:00
|
|
|
return txn.fetchall()
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-04-23 13:19:08 -04:00
|
|
|
"get_ex_outlier_stream_rows", get_ex_outlier_stream_rows_txn
|
2020-03-25 10:54:01 -04:00
|
|
|
)
|
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
async def get_all_new_backfill_event_rows(
|
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
|
|
|
) -> Tuple[List[Tuple[int, list]], int, bool]:
|
|
|
|
"""Get updates for backfill replication stream, including all new
|
|
|
|
backfilled events and events that have gone from being outliers to not.
|
|
|
|
|
2020-10-09 08:10:33 -04:00
|
|
|
NOTE: The IDs given here are from replication, and so should be
|
|
|
|
*positive*.
|
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
Args:
|
|
|
|
instance_name: The writer we want to fetch updates from. Unused
|
|
|
|
here since there is only ever one writer.
|
|
|
|
last_id: The token to fetch updates from. Exclusive.
|
|
|
|
current_id: The token to fetch updates up to. Inclusive.
|
|
|
|
limit: The requested limit for the number of rows to return. The
|
|
|
|
function may return more or fewer rows.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple consisting of: the updates, a token to use to fetch
|
|
|
|
subsequent updates, and whether we returned fewer rows than exists
|
|
|
|
between the requested tokens due to the limit.
|
|
|
|
|
|
|
|
The token returned can be used in a subsequent call to this
|
|
|
|
function to get further updatees.
|
|
|
|
|
|
|
|
The updates are a list of 2-tuples of stream ID and the row data
|
|
|
|
"""
|
2020-03-25 10:54:01 -04:00
|
|
|
if last_id == current_id:
|
2020-06-16 12:10:28 -04:00
|
|
|
return [], current_id, False
|
2020-03-25 10:54:01 -04:00
|
|
|
|
|
|
|
def get_all_new_backfill_event_rows(txn):
|
|
|
|
sql = (
|
|
|
|
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
|
|
|
|
" state_key, redacts, relates_to_id"
|
|
|
|
" FROM events AS e"
|
|
|
|
" LEFT JOIN redactions USING (event_id)"
|
|
|
|
" LEFT JOIN state_events USING (event_id)"
|
|
|
|
" LEFT JOIN event_relations USING (event_id)"
|
|
|
|
" WHERE ? > stream_ordering AND stream_ordering >= ?"
|
2020-10-09 08:10:33 -04:00
|
|
|
" AND instance_name = ?"
|
2020-03-25 10:54:01 -04:00
|
|
|
" ORDER BY stream_ordering ASC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (-last_id, -current_id, instance_name, limit))
|
2020-06-16 12:10:28 -04:00
|
|
|
new_event_updates = [(row[0], row[1:]) for row in txn]
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
limited = False
|
2020-03-25 10:54:01 -04:00
|
|
|
if len(new_event_updates) == limit:
|
|
|
|
upper_bound = new_event_updates[-1][0]
|
2020-06-16 12:10:28 -04:00
|
|
|
limited = True
|
2020-03-25 10:54:01 -04:00
|
|
|
else:
|
|
|
|
upper_bound = current_id
|
|
|
|
|
|
|
|
sql = (
|
|
|
|
"SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
|
|
|
|
" state_key, redacts, relates_to_id"
|
|
|
|
" FROM events AS e"
|
2020-10-09 08:10:33 -04:00
|
|
|
" INNER JOIN ex_outlier_stream AS out USING (event_id)"
|
2020-03-25 10:54:01 -04:00
|
|
|
" LEFT JOIN redactions USING (event_id)"
|
|
|
|
" LEFT JOIN state_events USING (event_id)"
|
|
|
|
" LEFT JOIN event_relations USING (event_id)"
|
|
|
|
" WHERE ? > event_stream_ordering"
|
|
|
|
" AND event_stream_ordering >= ?"
|
2020-10-09 08:10:33 -04:00
|
|
|
" AND out.instance_name = ?"
|
2020-03-25 10:54:01 -04:00
|
|
|
" ORDER BY event_stream_ordering DESC"
|
|
|
|
)
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (-last_id, -upper_bound, instance_name))
|
2020-06-16 12:10:28 -04:00
|
|
|
new_event_updates.extend((row[0], row[1:]) for row in txn)
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
if len(new_event_updates) >= limit:
|
|
|
|
upper_bound = new_event_updates[-1][0]
|
|
|
|
limited = True
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
return new_event_updates, upper_bound, limited
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-03-25 10:54:01 -04:00
|
|
|
"get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
|
|
|
|
)
|
|
|
|
|
2020-04-29 07:30:36 -04:00
|
|
|
async def get_all_updated_current_state_deltas(
|
2020-10-09 08:10:33 -04:00
|
|
|
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
2020-04-29 07:30:36 -04:00
|
|
|
) -> Tuple[List[Tuple], int, bool]:
|
|
|
|
"""Fetch updates from current_state_delta_stream
|
|
|
|
|
|
|
|
Args:
|
|
|
|
from_token: The previous stream token. Updates from this stream id will
|
|
|
|
be excluded.
|
|
|
|
|
|
|
|
to_token: The current stream token (ie the upper limit). Updates up to this
|
|
|
|
stream id will be included (modulo the 'limit' param)
|
|
|
|
|
|
|
|
target_row_count: The number of rows to try to return. If more rows are
|
|
|
|
available, we will set 'limited' in the result. In the event of a large
|
|
|
|
batch, we may return more rows than this.
|
|
|
|
Returns:
|
|
|
|
A triplet `(updates, new_last_token, limited)`, where:
|
|
|
|
* `updates` is a list of database tuples.
|
|
|
|
* `new_last_token` is the new position in stream.
|
|
|
|
* `limited` is whether there are more updates to fetch.
|
|
|
|
"""
|
|
|
|
|
2020-03-25 10:54:01 -04:00
|
|
|
def get_all_updated_current_state_deltas_txn(txn):
|
|
|
|
sql = """
|
|
|
|
SELECT stream_id, room_id, type, state_key, event_id
|
|
|
|
FROM current_state_delta_stream
|
|
|
|
WHERE ? < stream_id AND stream_id <= ?
|
2020-10-09 08:10:33 -04:00
|
|
|
AND instance_name = ?
|
2020-03-25 10:54:01 -04:00
|
|
|
ORDER BY stream_id ASC LIMIT ?
|
|
|
|
"""
|
2020-10-09 08:10:33 -04:00
|
|
|
txn.execute(sql, (from_token, to_token, instance_name, target_row_count))
|
2020-03-25 10:54:01 -04:00
|
|
|
return txn.fetchall()
|
|
|
|
|
2020-04-29 07:30:36 -04:00
|
|
|
def get_deltas_for_stream_id_txn(txn, stream_id):
|
|
|
|
sql = """
|
|
|
|
SELECT stream_id, room_id, type, state_key, event_id
|
|
|
|
FROM current_state_delta_stream
|
|
|
|
WHERE stream_id = ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, [stream_id])
|
|
|
|
return txn.fetchall()
|
|
|
|
|
|
|
|
# we need to make sure that, for every stream id in the results, we get *all*
|
|
|
|
# the rows with that stream id.
|
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
rows = await self.db_pool.runInteraction(
|
2020-03-25 10:54:01 -04:00
|
|
|
"get_all_updated_current_state_deltas",
|
|
|
|
get_all_updated_current_state_deltas_txn,
|
2020-04-29 07:30:36 -04:00
|
|
|
) # type: List[Tuple]
|
|
|
|
|
|
|
|
# if we've got fewer rows than the limit, we're good
|
|
|
|
if len(rows) < target_row_count:
|
|
|
|
return rows, to_token, False
|
|
|
|
|
|
|
|
# we hit the limit, so reduce the upper limit so that we exclude the stream id
|
|
|
|
# of the last row in the result.
|
|
|
|
assert rows[-1][0] <= to_token
|
|
|
|
to_token = rows[-1][0] - 1
|
|
|
|
|
|
|
|
# search backwards through the list for the point to truncate
|
|
|
|
for idx in range(len(rows) - 1, 0, -1):
|
|
|
|
if rows[idx - 1][0] <= to_token:
|
|
|
|
return rows[:idx], to_token, True
|
|
|
|
|
|
|
|
# bother. We didn't get a full set of changes for even a single
|
|
|
|
# stream id. let's run the query again, without a row limit, but for
|
|
|
|
# just one stream id.
|
|
|
|
to_token += 1
|
2020-08-05 16:38:57 -04:00
|
|
|
rows = await self.db_pool.runInteraction(
|
2020-04-29 07:30:36 -04:00
|
|
|
"get_deltas_for_stream_id", get_deltas_for_stream_id_txn, to_token
|
2020-03-25 10:54:01 -04:00
|
|
|
)
|
2020-05-13 08:38:22 -04:00
|
|
|
|
2020-04-29 07:30:36 -04:00
|
|
|
return rows, to_token, True
|
2020-05-13 08:38:22 -04:00
|
|
|
|
|
|
|
async def is_event_after(self, event_id1, event_id2):
|
|
|
|
"""Returns True if event_id1 is after event_id2 in the stream"""
|
2020-05-22 09:21:54 -04:00
|
|
|
to_1, so_1 = await self.get_event_ordering(event_id1)
|
|
|
|
to_2, so_2 = await self.get_event_ordering(event_id2)
|
2020-05-13 08:38:22 -04:00
|
|
|
return (to_1, so_1) > (to_2, so_2)
|
|
|
|
|
2020-08-18 16:20:49 -04:00
|
|
|
@cached(max_entries=5000)
|
|
|
|
async def get_event_ordering(self, event_id):
|
|
|
|
res = await self.db_pool.simple_select_one(
|
2020-05-13 08:38:22 -04:00
|
|
|
table="events",
|
|
|
|
retcols=["topological_ordering", "stream_ordering"],
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
if not res:
|
|
|
|
raise SynapseError(404, "Could not find event %s" % (event_id,))
|
|
|
|
|
|
|
|
return (int(res["topological_ordering"]), int(res["stream_ordering"]))
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
async def get_next_event_to_expire(self) -> Optional[Tuple[str, int]]:
|
2020-05-13 08:38:22 -04:00
|
|
|
"""Retrieve the entry with the lowest expiry timestamp in the event_expiry
|
|
|
|
table, or None if there's no more event to expire.
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
Returns:
|
2020-05-13 08:38:22 -04:00
|
|
|
A tuple containing the event ID as its first element and an expiry timestamp
|
|
|
|
as its second one, if there's at least one row in the event_expiry table.
|
|
|
|
None otherwise.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def get_next_event_to_expire_txn(txn):
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT event_id, expiry_ts FROM event_expiry
|
|
|
|
ORDER BY expiry_ts ASC LIMIT 1
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
|
|
|
return txn.fetchone()
|
|
|
|
|
2020-09-01 09:21:48 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-05-13 08:38:22 -04:00
|
|
|
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
|
|
|
|
)
|
2020-10-13 07:07:56 -04:00
|
|
|
|
|
|
|
async def get_event_id_from_transaction_id(
|
|
|
|
self, room_id: str, user_id: str, token_id: int, txn_id: str
|
|
|
|
) -> Optional[str]:
|
|
|
|
"""Look up if we have already persisted an event for the transaction ID,
|
|
|
|
returning the event ID if so.
|
|
|
|
"""
|
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="event_txn_id",
|
|
|
|
keyvalues={
|
|
|
|
"room_id": room_id,
|
|
|
|
"user_id": user_id,
|
|
|
|
"token_id": token_id,
|
|
|
|
"txn_id": txn_id,
|
|
|
|
},
|
|
|
|
retcol="event_id",
|
|
|
|
allow_none=True,
|
|
|
|
desc="get_event_id_from_transaction_id",
|
|
|
|
)
|
|
|
|
|
|
|
|
async def get_already_persisted_events(
|
|
|
|
self, events: Iterable[EventBase]
|
|
|
|
) -> Dict[str, str]:
|
|
|
|
"""Look up if we have already persisted an event for the transaction ID,
|
|
|
|
returning a mapping from event ID in the given list to the event ID of
|
|
|
|
an existing event.
|
|
|
|
|
|
|
|
Also checks if there are duplicates in the given events, if there are
|
|
|
|
will map duplicates to the *first* event.
|
|
|
|
"""
|
|
|
|
|
|
|
|
mapping = {}
|
|
|
|
txn_id_to_event = {} # type: Dict[Tuple[str, int, str], str]
|
|
|
|
|
|
|
|
for event in events:
|
|
|
|
token_id = getattr(event.internal_metadata, "token_id", None)
|
|
|
|
txn_id = getattr(event.internal_metadata, "txn_id", None)
|
|
|
|
|
|
|
|
if token_id and txn_id:
|
|
|
|
# Check if this is a duplicate of an event in the given events.
|
|
|
|
existing = txn_id_to_event.get((event.room_id, token_id, txn_id))
|
|
|
|
if existing:
|
|
|
|
mapping[event.event_id] = existing
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Check if this is a duplicate of an event we've already
|
|
|
|
# persisted.
|
|
|
|
existing = await self.get_event_id_from_transaction_id(
|
|
|
|
event.room_id, event.sender, token_id, txn_id
|
|
|
|
)
|
|
|
|
if existing:
|
|
|
|
mapping[event.event_id] = existing
|
|
|
|
txn_id_to_event[(event.room_id, token_id, txn_id)] = existing
|
|
|
|
else:
|
|
|
|
txn_id_to_event[(event.room_id, token_id, txn_id)] = event.event_id
|
|
|
|
|
|
|
|
return mapping
|
|
|
|
|
2020-10-20 11:29:38 -04:00
|
|
|
@wrap_as_background_process("_cleanup_old_transaction_ids")
|
2020-10-13 07:07:56 -04:00
|
|
|
async def _cleanup_old_transaction_ids(self):
|
|
|
|
"""Cleans out transaction id mappings older than 24hrs."""
|
|
|
|
|
|
|
|
def _cleanup_old_transaction_ids_txn(txn):
|
|
|
|
sql = """
|
|
|
|
DELETE FROM event_txn_id
|
|
|
|
WHERE inserted_ts < ?
|
|
|
|
"""
|
|
|
|
one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
|
|
|
|
txn.execute(sql, (one_day_ago,))
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"_cleanup_old_transaction_ids",
|
|
|
|
_cleanup_old_transaction_ids_txn,
|
|
|
|
)
|