mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-05-07 16:35:05 -04:00
Add type hints to synapse/storage/databases/main/events_worker.py
(#11411)
Also refactor the stream ID trackers/generators a bit and try to document them better.
This commit is contained in:
parent
1d8b80b334
commit
ffd858aa68
13 changed files with 255 additions and 171 deletions
|
@ -15,14 +15,18 @@
|
|||
import logging
|
||||
import threading
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Collection,
|
||||
Container,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
|
||||
|
@ -38,6 +42,7 @@ from synapse.api.errors import NotFoundError, SynapseError
|
|||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
|
@ -56,10 +61,18 @@ from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
|||
from synapse.replication.tcp.streams import BackfillStream
|
||||
from synapse.replication.tcp.streams.events import EventsStream
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Connection
|
||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
|
||||
from synapse.storage.types import Cursor
|
||||
from synapse.storage.util.id_generators import (
|
||||
AbstractStreamIdTracker,
|
||||
MultiWriterIdGenerator,
|
||||
StreamIdGenerator,
|
||||
)
|
||||
from synapse.storage.util.sequence import build_sequence_generator
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
|
@ -69,6 +82,9 @@ from synapse.util.caches.lrucache import LruCache
|
|||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -89,7 +105,7 @@ event_fetch_ongoing_gauge = Gauge(
|
|||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class _EventCacheEntry:
|
||||
class EventCacheEntry:
|
||||
event: EventBase
|
||||
redacted_event: Optional[EventBase]
|
||||
|
||||
|
@ -129,7 +145,7 @@ class _EventRow:
|
|||
json: str
|
||||
internal_metadata: str
|
||||
format_version: Optional[int]
|
||||
room_version_id: Optional[int]
|
||||
room_version_id: Optional[str]
|
||||
rejected_reason: Optional[str]
|
||||
redactions: List[str]
|
||||
outlier: bool
|
||||
|
@ -153,9 +169,16 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
# options controlling this.
|
||||
USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = True
|
||||
|
||||
def __init__(self, database: DatabasePool, db_conn, hs):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self._stream_id_gen: AbstractStreamIdTracker
|
||||
self._backfill_id_gen: AbstractStreamIdTracker
|
||||
if isinstance(database.engine, PostgresEngine):
|
||||
# If we're using Postgres than we can use `MultiWriterIdGenerator`
|
||||
# regardless of whether this process writes to the streams or not.
|
||||
|
@ -214,7 +237,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
5 * 60 * 1000,
|
||||
)
|
||||
|
||||
self._get_event_cache = LruCache(
|
||||
self._get_event_cache: LruCache[Tuple[str], EventCacheEntry] = LruCache(
|
||||
cache_name="*getEvent*",
|
||||
max_size=hs.config.caches.event_cache_size,
|
||||
)
|
||||
|
@ -223,19 +246,21 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
# ID to cache entry. Note that the returned dict may not have the
|
||||
# requested event in it if the event isn't in the DB.
|
||||
self._current_event_fetches: Dict[
|
||||
str, ObservableDeferred[Dict[str, _EventCacheEntry]]
|
||||
str, ObservableDeferred[Dict[str, EventCacheEntry]]
|
||||
] = {}
|
||||
|
||||
self._event_fetch_lock = threading.Condition()
|
||||
self._event_fetch_list = []
|
||||
self._event_fetch_list: List[
|
||||
Tuple[Iterable[str], "defer.Deferred[Dict[str, _EventRow]]"]
|
||||
] = []
|
||||
self._event_fetch_ongoing = 0
|
||||
event_fetch_ongoing_gauge.set(self._event_fetch_ongoing)
|
||||
|
||||
# We define this sequence here so that it can be referenced from both
|
||||
# the DataStore and PersistEventStore.
|
||||
def get_chain_id_txn(txn):
|
||||
def get_chain_id_txn(txn: Cursor) -> int:
|
||||
txn.execute("SELECT COALESCE(max(chain_id), 0) FROM event_auth_chains")
|
||||
return txn.fetchone()[0]
|
||||
return cast(Tuple[int], txn.fetchone())[0]
|
||||
|
||||
self.event_chain_id_gen = build_sequence_generator(
|
||||
db_conn,
|
||||
|
@ -246,7 +271,13 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
id_column="chain_id",
|
||||
)
|
||||
|
||||
def process_replication_rows(self, stream_name, instance_name, token, rows):
|
||||
def process_replication_rows(
|
||||
self,
|
||||
stream_name: str,
|
||||
instance_name: str,
|
||||
token: int,
|
||||
rows: Iterable[Any],
|
||||
) -> None:
|
||||
if stream_name == EventsStream.NAME:
|
||||
self._stream_id_gen.advance(instance_name, token)
|
||||
elif stream_name == BackfillStream.NAME:
|
||||
|
@ -280,10 +311,10 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
self,
|
||||
event_id: str,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
allow_none: Literal[False] = False,
|
||||
check_room_id: Optional[str] = None,
|
||||
get_prev_content: bool = ...,
|
||||
allow_rejected: bool = ...,
|
||||
allow_none: Literal[False] = ...,
|
||||
check_room_id: Optional[str] = ...,
|
||||
) -> EventBase:
|
||||
...
|
||||
|
||||
|
@ -292,10 +323,10 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
self,
|
||||
event_id: str,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
allow_none: Literal[True] = False,
|
||||
check_room_id: Optional[str] = None,
|
||||
get_prev_content: bool = ...,
|
||||
allow_rejected: bool = ...,
|
||||
allow_none: Literal[True] = ...,
|
||||
check_room_id: Optional[str] = ...,
|
||||
) -> Optional[EventBase]:
|
||||
...
|
||||
|
||||
|
@ -357,7 +388,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
async def get_events(
|
||||
self,
|
||||
event_ids: Iterable[str],
|
||||
event_ids: Collection[str],
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
|
@ -544,7 +575,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
async def _get_events_from_cache_or_db(
|
||||
self, event_ids: Iterable[str], allow_rejected: bool = False
|
||||
) -> Dict[str, _EventCacheEntry]:
|
||||
) -> Dict[str, EventCacheEntry]:
|
||||
"""Fetch a bunch of events from the cache or the database.
|
||||
|
||||
If events are pulled from the database, they will be cached for future lookups.
|
||||
|
@ -578,7 +609,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
# same dict into itself N times).
|
||||
already_fetching_ids: Set[str] = set()
|
||||
already_fetching_deferreds: Set[
|
||||
ObservableDeferred[Dict[str, _EventCacheEntry]]
|
||||
ObservableDeferred[Dict[str, EventCacheEntry]]
|
||||
] = set()
|
||||
|
||||
for event_id in missing_events_ids:
|
||||
|
@ -601,7 +632,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
# function returning more events than requested, but that can happen
|
||||
# already due to `_get_events_from_db`).
|
||||
fetching_deferred: ObservableDeferred[
|
||||
Dict[str, _EventCacheEntry]
|
||||
Dict[str, EventCacheEntry]
|
||||
] = ObservableDeferred(defer.Deferred(), consumeErrors=True)
|
||||
for event_id in missing_events_ids:
|
||||
self._current_event_fetches[event_id] = fetching_deferred
|
||||
|
@ -658,12 +689,12 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
return event_entry_map
|
||||
|
||||
def _invalidate_get_event_cache(self, event_id):
|
||||
def _invalidate_get_event_cache(self, event_id: str) -> None:
|
||||
self._get_event_cache.invalidate((event_id,))
|
||||
|
||||
def _get_events_from_cache(
|
||||
self, events: Iterable[str], update_metrics: bool = True
|
||||
) -> Dict[str, _EventCacheEntry]:
|
||||
) -> Dict[str, EventCacheEntry]:
|
||||
"""Fetch events from the caches.
|
||||
|
||||
May return rejected events.
|
||||
|
@ -820,7 +851,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
for _, deferred in event_fetches_to_fail:
|
||||
deferred.errback(exc)
|
||||
|
||||
def _fetch_loop(self, conn: Connection) -> None:
|
||||
def _fetch_loop(self, conn: LoggingDatabaseConnection) -> None:
|
||||
"""Takes a database connection and waits for requests for events from
|
||||
the _event_fetch_list queue.
|
||||
"""
|
||||
|
@ -850,7 +881,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
self._fetch_event_list(conn, event_list)
|
||||
|
||||
def _fetch_event_list(
|
||||
self, conn: Connection, event_list: List[Tuple[List[str], defer.Deferred]]
|
||||
self,
|
||||
conn: LoggingDatabaseConnection,
|
||||
event_list: List[Tuple[Iterable[str], "defer.Deferred[Dict[str, _EventRow]]"]],
|
||||
) -> None:
|
||||
"""Handle a load of requests from the _event_fetch_list queue
|
||||
|
||||
|
@ -877,7 +910,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
)
|
||||
|
||||
# We only want to resolve deferreds from the main thread
|
||||
def fire():
|
||||
def fire() -> None:
|
||||
for _, d in event_list:
|
||||
d.callback(row_dict)
|
||||
|
||||
|
@ -887,16 +920,16 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
logger.exception("do_fetch")
|
||||
|
||||
# We only want to resolve deferreds from the main thread
|
||||
def fire(evs, exc):
|
||||
for _, d in evs:
|
||||
def fire_errback(exc: Exception) -> None:
|
||||
for _, d in event_list:
|
||||
d.errback(exc)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self.hs.get_reactor().callFromThread(fire, event_list, e)
|
||||
self.hs.get_reactor().callFromThread(fire_errback, e)
|
||||
|
||||
async def _get_events_from_db(
|
||||
self, event_ids: Iterable[str]
|
||||
) -> Dict[str, _EventCacheEntry]:
|
||||
self, event_ids: Collection[str]
|
||||
) -> Dict[str, EventCacheEntry]:
|
||||
"""Fetch a bunch of events from the database.
|
||||
|
||||
May return rejected events.
|
||||
|
@ -912,29 +945,29 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
map from event id to result. May return extra events which
|
||||
weren't asked for.
|
||||
"""
|
||||
fetched_events = {}
|
||||
fetched_event_ids: Set[str] = set()
|
||||
fetched_events: Dict[str, _EventRow] = {}
|
||||
events_to_fetch = event_ids
|
||||
|
||||
while events_to_fetch:
|
||||
row_map = await self._enqueue_events(events_to_fetch)
|
||||
|
||||
# we need to recursively fetch any redactions of those events
|
||||
redaction_ids = set()
|
||||
redaction_ids: Set[str] = set()
|
||||
for event_id in events_to_fetch:
|
||||
row = row_map.get(event_id)
|
||||
fetched_events[event_id] = row
|
||||
fetched_event_ids.add(event_id)
|
||||
if row:
|
||||
fetched_events[event_id] = row
|
||||
redaction_ids.update(row.redactions)
|
||||
|
||||
events_to_fetch = redaction_ids.difference(fetched_events.keys())
|
||||
events_to_fetch = redaction_ids.difference(fetched_event_ids)
|
||||
if events_to_fetch:
|
||||
logger.debug("Also fetching redaction events %s", events_to_fetch)
|
||||
|
||||
# build a map from event_id to EventBase
|
||||
event_map = {}
|
||||
event_map: Dict[str, EventBase] = {}
|
||||
for event_id, row in fetched_events.items():
|
||||
if not row:
|
||||
continue
|
||||
assert row.event_id == event_id
|
||||
|
||||
rejected_reason = row.rejected_reason
|
||||
|
@ -962,6 +995,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
room_version_id = row.room_version_id
|
||||
|
||||
room_version: Optional[RoomVersion]
|
||||
if not room_version_id:
|
||||
# this should only happen for out-of-band membership events which
|
||||
# arrived before #6983 landed. For all other events, we should have
|
||||
|
@ -1032,14 +1066,14 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
# finally, we can decide whether each one needs redacting, and build
|
||||
# the cache entries.
|
||||
result_map = {}
|
||||
result_map: Dict[str, EventCacheEntry] = {}
|
||||
for event_id, original_ev in event_map.items():
|
||||
redactions = fetched_events[event_id].redactions
|
||||
redacted_event = self._maybe_redact_event_row(
|
||||
original_ev, redactions, event_map
|
||||
)
|
||||
|
||||
cache_entry = _EventCacheEntry(
|
||||
cache_entry = EventCacheEntry(
|
||||
event=original_ev, redacted_event=redacted_event
|
||||
)
|
||||
|
||||
|
@ -1048,7 +1082,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
return result_map
|
||||
|
||||
async def _enqueue_events(self, events: Iterable[str]) -> Dict[str, _EventRow]:
|
||||
async def _enqueue_events(self, events: Collection[str]) -> Dict[str, _EventRow]:
|
||||
"""Fetches events from the database using the _event_fetch_list. This
|
||||
allows batch and bulk fetching of events - it allows us to fetch events
|
||||
without having to create a new transaction for each request for events.
|
||||
|
@ -1061,7 +1095,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
that weren't requested.
|
||||
"""
|
||||
|
||||
events_d = defer.Deferred()
|
||||
events_d: "defer.Deferred[Dict[str, _EventRow]]" = defer.Deferred()
|
||||
with self._event_fetch_lock:
|
||||
self._event_fetch_list.append((events, events_d))
|
||||
self._event_fetch_lock.notify()
|
||||
|
@ -1216,7 +1250,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
# no valid redaction found for this event
|
||||
return None
|
||||
|
||||
async def have_events_in_timeline(self, event_ids):
|
||||
async def have_events_in_timeline(self, event_ids: Iterable[str]) -> Set[str]:
|
||||
"""Given a list of event ids, check if we have already processed and
|
||||
stored them as non outliers.
|
||||
"""
|
||||
|
@ -1245,7 +1279,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
event_ids: events we are looking for
|
||||
|
||||
Returns:
|
||||
set[str]: The events we have already seen.
|
||||
The set of events we have already seen.
|
||||
"""
|
||||
res = await self._have_seen_events_dict(
|
||||
(room_id, event_id) for event_id in event_ids
|
||||
|
@ -1268,7 +1302,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
}
|
||||
results = {x: True for x in cache_results}
|
||||
|
||||
def have_seen_events_txn(txn, chunk: Tuple[Tuple[str, str], ...]):
|
||||
def have_seen_events_txn(
|
||||
txn: LoggingTransaction, chunk: Tuple[Tuple[str, str], ...]
|
||||
) -> None:
|
||||
# we deliberately do *not* query the database for room_id, to make the
|
||||
# query an index-only lookup on `events_event_id_key`.
|
||||
#
|
||||
|
@ -1294,12 +1330,14 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
return results
|
||||
|
||||
@cached(max_entries=100000, tree=True)
|
||||
async def have_seen_event(self, room_id: str, event_id: str):
|
||||
async def have_seen_event(self, room_id: str, event_id: str) -> NoReturn:
|
||||
# this only exists for the benefit of the @cachedList descriptor on
|
||||
# _have_seen_events_dict
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_current_state_event_counts_txn(self, txn, room_id):
|
||||
def _get_current_state_event_counts_txn(
|
||||
self, txn: LoggingTransaction, room_id: str
|
||||
) -> int:
|
||||
"""
|
||||
See get_current_state_event_counts.
|
||||
"""
|
||||
|
@ -1324,7 +1362,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
room_id,
|
||||
)
|
||||
|
||||
async def get_room_complexity(self, room_id):
|
||||
async def get_room_complexity(self, room_id: str) -> Dict[str, float]:
|
||||
"""
|
||||
Get a rough approximation of the complexity of the room. This is used by
|
||||
remote servers to decide whether they wish to join the room or not.
|
||||
|
@ -1332,10 +1370,10 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
more resources.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
room_id: The room ID to query.
|
||||
|
||||
Returns:
|
||||
dict[str:int] of complexity version to complexity.
|
||||
dict[str:float] of complexity version to complexity.
|
||||
"""
|
||||
state_events = await self.get_current_state_event_counts(room_id)
|
||||
|
||||
|
@ -1345,13 +1383,13 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
return {"v1": complexity_v1}
|
||||
|
||||
def get_current_events_token(self):
|
||||
def get_current_events_token(self) -> int:
|
||||
"""The current maximum token that events have reached"""
|
||||
return self._stream_id_gen.get_current_token()
|
||||
|
||||
async def get_all_new_forward_event_rows(
|
||||
self, instance_name: str, last_id: int, current_id: int, limit: int
|
||||
) -> List[Tuple]:
|
||||
) -> List[Tuple[int, str, str, str, str, str, str, str, str]]:
|
||||
"""Returns new events, for the Events replication stream
|
||||
|
||||
Args:
|
||||
|
@ -1365,7 +1403,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
EventsStreamRow.
|
||||
"""
|
||||
|
||||
def get_all_new_forward_event_rows(txn):
|
||||
def get_all_new_forward_event_rows(
|
||||
txn: LoggingTransaction,
|
||||
) -> List[Tuple[int, str, str, str, str, str, str, str, str]]:
|
||||
sql = (
|
||||
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
|
||||
" state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL"
|
||||
|
@ -1381,7 +1421,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
" LIMIT ?"
|
||||
)
|
||||
txn.execute(sql, (last_id, current_id, instance_name, limit))
|
||||
return txn.fetchall()
|
||||
return cast(
|
||||
List[Tuple[int, str, str, str, str, str, str, str, str]], txn.fetchall()
|
||||
)
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
|
||||
|
@ -1389,7 +1431,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
async def get_ex_outlier_stream_rows(
|
||||
self, instance_name: str, last_id: int, current_id: int
|
||||
) -> List[Tuple]:
|
||||
) -> List[Tuple[int, str, str, str, str, str, str, str, str]]:
|
||||
"""Returns de-outliered events, for the Events replication stream
|
||||
|
||||
Args:
|
||||
|
@ -1402,7 +1444,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
EventsStreamRow.
|
||||
"""
|
||||
|
||||
def get_ex_outlier_stream_rows_txn(txn):
|
||||
def get_ex_outlier_stream_rows_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> List[Tuple[int, str, str, str, str, str, str, str, str]]:
|
||||
sql = (
|
||||
"SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
|
||||
" state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL"
|
||||
|
@ -1420,7 +1464,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
)
|
||||
|
||||
txn.execute(sql, (last_id, current_id, instance_name))
|
||||
return txn.fetchall()
|
||||
return cast(
|
||||
List[Tuple[int, str, str, str, str, str, str, str, str]], txn.fetchall()
|
||||
)
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_ex_outlier_stream_rows", get_ex_outlier_stream_rows_txn
|
||||
|
@ -1428,7 +1474,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
async def get_all_new_backfill_event_rows(
|
||||
self, instance_name: str, last_id: int, current_id: int, limit: int
|
||||
) -> Tuple[List[Tuple[int, list]], int, bool]:
|
||||
) -> Tuple[List[Tuple[int, Tuple[str, str, str, str, str, str]]], int, bool]:
|
||||
"""Get updates for backfill replication stream, including all new
|
||||
backfilled events and events that have gone from being outliers to not.
|
||||
|
||||
|
@ -1456,7 +1502,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
if last_id == current_id:
|
||||
return [], current_id, False
|
||||
|
||||
def get_all_new_backfill_event_rows(txn):
|
||||
def get_all_new_backfill_event_rows(
|
||||
txn: LoggingTransaction,
|
||||
) -> Tuple[List[Tuple[int, Tuple[str, str, str, str, str, str]]], int, bool]:
|
||||
sql = (
|
||||
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
|
||||
" state_key, redacts, relates_to_id"
|
||||
|
@ -1470,7 +1518,15 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
" LIMIT ?"
|
||||
)
|
||||
txn.execute(sql, (-last_id, -current_id, instance_name, limit))
|
||||
new_event_updates = [(row[0], row[1:]) for row in txn]
|
||||
new_event_updates: List[
|
||||
Tuple[int, Tuple[str, str, str, str, str, str]]
|
||||
] = []
|
||||
row: Tuple[int, str, str, str, str, str, str]
|
||||
# Type safety: iterating over `txn` yields `Tuple`, i.e.
|
||||
# `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a
|
||||
# variadic tuple to a fixed length tuple and flags it up as an error.
|
||||
for row in txn: # type: ignore[assignment]
|
||||
new_event_updates.append((row[0], row[1:]))
|
||||
|
||||
limited = False
|
||||
if len(new_event_updates) == limit:
|
||||
|
@ -1493,7 +1549,11 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
" ORDER BY event_stream_ordering DESC"
|
||||
)
|
||||
txn.execute(sql, (-last_id, -upper_bound, instance_name))
|
||||
new_event_updates.extend((row[0], row[1:]) for row in txn)
|
||||
# Type safety: iterating over `txn` yields `Tuple`, i.e.
|
||||
# `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a
|
||||
# variadic tuple to a fixed length tuple and flags it up as an error.
|
||||
for row in txn: # type: ignore[assignment]
|
||||
new_event_updates.append((row[0], row[1:]))
|
||||
|
||||
if len(new_event_updates) >= limit:
|
||||
upper_bound = new_event_updates[-1][0]
|
||||
|
@ -1507,7 +1567,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
async def get_all_updated_current_state_deltas(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
) -> Tuple[List[Tuple], int, bool]:
|
||||
) -> Tuple[List[Tuple[int, str, str, str, str]], int, bool]:
|
||||
"""Fetch updates from current_state_delta_stream
|
||||
|
||||
Args:
|
||||
|
@ -1527,7 +1587,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
* `limited` is whether there are more updates to fetch.
|
||||
"""
|
||||
|
||||
def get_all_updated_current_state_deltas_txn(txn):
|
||||
def get_all_updated_current_state_deltas_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> List[Tuple[int, str, str, str, str]]:
|
||||
sql = """
|
||||
SELECT stream_id, room_id, type, state_key, event_id
|
||||
FROM current_state_delta_stream
|
||||
|
@ -1536,21 +1598,23 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
ORDER BY stream_id ASC LIMIT ?
|
||||
"""
|
||||
txn.execute(sql, (from_token, to_token, instance_name, target_row_count))
|
||||
return txn.fetchall()
|
||||
return cast(List[Tuple[int, str, str, str, str]], txn.fetchall())
|
||||
|
||||
def get_deltas_for_stream_id_txn(txn, stream_id):
|
||||
def get_deltas_for_stream_id_txn(
|
||||
txn: LoggingTransaction, stream_id: int
|
||||
) -> List[Tuple[int, str, str, str, str]]:
|
||||
sql = """
|
||||
SELECT stream_id, room_id, type, state_key, event_id
|
||||
FROM current_state_delta_stream
|
||||
WHERE stream_id = ?
|
||||
"""
|
||||
txn.execute(sql, [stream_id])
|
||||
return txn.fetchall()
|
||||
return cast(List[Tuple[int, str, str, str, str]], txn.fetchall())
|
||||
|
||||
# we need to make sure that, for every stream id in the results, we get *all*
|
||||
# the rows with that stream id.
|
||||
|
||||
rows: List[Tuple] = await self.db_pool.runInteraction(
|
||||
rows: List[Tuple[int, str, str, str, str]] = await self.db_pool.runInteraction(
|
||||
"get_all_updated_current_state_deltas",
|
||||
get_all_updated_current_state_deltas_txn,
|
||||
)
|
||||
|
@ -1579,14 +1643,14 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
return rows, to_token, True
|
||||
|
||||
async def is_event_after(self, event_id1, event_id2):
|
||||
async def is_event_after(self, event_id1: str, event_id2: str) -> bool:
|
||||
"""Returns True if event_id1 is after event_id2 in the stream"""
|
||||
to_1, so_1 = await self.get_event_ordering(event_id1)
|
||||
to_2, so_2 = await self.get_event_ordering(event_id2)
|
||||
return (to_1, so_1) > (to_2, so_2)
|
||||
|
||||
@cached(max_entries=5000)
|
||||
async def get_event_ordering(self, event_id):
|
||||
async def get_event_ordering(self, event_id: str) -> Tuple[int, int]:
|
||||
res = await self.db_pool.simple_select_one(
|
||||
table="events",
|
||||
retcols=["topological_ordering", "stream_ordering"],
|
||||
|
@ -1609,7 +1673,9 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
None otherwise.
|
||||
"""
|
||||
|
||||
def get_next_event_to_expire_txn(txn):
|
||||
def get_next_event_to_expire_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> Optional[Tuple[str, int]]:
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT event_id, expiry_ts FROM event_expiry
|
||||
|
@ -1617,7 +1683,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
"""
|
||||
)
|
||||
|
||||
return txn.fetchone()
|
||||
return cast(Optional[Tuple[str, int]], txn.fetchone())
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
|
||||
|
@ -1681,10 +1747,10 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
return mapping
|
||||
|
||||
@wrap_as_background_process("_cleanup_old_transaction_ids")
|
||||
async def _cleanup_old_transaction_ids(self):
|
||||
async def _cleanup_old_transaction_ids(self) -> None:
|
||||
"""Cleans out transaction id mappings older than 24hrs."""
|
||||
|
||||
def _cleanup_old_transaction_ids_txn(txn):
|
||||
def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None:
|
||||
sql = """
|
||||
DELETE FROM event_txn_id
|
||||
WHERE inserted_ts < ?
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue