2017-03-27 09:58:26 -04:00
|
|
|
#
|
2023-11-21 15:29:58 -05:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
2024-01-23 06:26:48 -05:00
|
|
|
# Copyright 2017 Vector Creations Ltd
|
2023-11-21 15:29:58 -05:00
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2017-03-27 09:58:26 -04:00
|
|
|
#
|
|
|
|
#
|
|
|
|
|
2020-05-15 14:03:25 -04:00
|
|
|
import heapq
|
2017-03-27 09:58:26 -04:00
|
|
|
import logging
|
2020-05-15 14:03:25 -04:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Awaitable,
|
|
|
|
Callable,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Tuple,
|
|
|
|
TypeVar,
|
|
|
|
)
|
2020-01-22 05:37:00 -05:00
|
|
|
|
|
|
|
import attr
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-01-13 09:57:43 -05:00
|
|
|
from synapse.api.constants import AccountDataTypes
|
2020-03-25 10:54:01 -04:00
|
|
|
from synapse.replication.http.streams import ReplicationGetStreamUpdates
|
2020-03-20 10:40:47 -04:00
|
|
|
|
2020-05-15 14:03:25 -04:00
|
|
|
if TYPE_CHECKING:
|
2021-03-23 07:12:48 -04:00
|
|
|
from synapse.server import HomeServer
|
2023-10-23 11:57:30 -04:00
|
|
|
from synapse.storage.util.id_generators import AbstractStreamIdGenerator
|
2020-05-15 14:03:25 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2020-04-23 10:45:12 -04:00
|
|
|
# the number of rows to request from an update_function.
|
|
|
|
_STREAM_UPDATE_TARGET_ROW_COUNT = 100
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
2020-03-25 10:54:01 -04:00
|
|
|
# Some type aliases to make things a bit easier.
|
|
|
|
|
|
|
|
# A stream position token
|
|
|
|
Token = int
|
|
|
|
|
2020-04-17 09:49:55 -04:00
|
|
|
# The type of a stream update row, after JSON deserialisation, but before
|
|
|
|
# parsing with Stream.parse_row (which turns it into a `ROW_TYPE`). Normally it's
|
|
|
|
# just a row from a database query, though this is dependent on the stream in question.
|
|
|
|
#
|
2020-05-15 14:03:25 -04:00
|
|
|
StreamRow = TypeVar("StreamRow", bound=Tuple)
|
2020-04-17 09:49:55 -04:00
|
|
|
|
|
|
|
# The type returned by the update_function of a stream, as well as get_updates(),
|
|
|
|
# get_updates_since, etc.
|
|
|
|
#
|
|
|
|
# It consists of a triplet `(updates, new_last_token, limited)`, where:
|
|
|
|
# * `updates` is a list of `(token, row)` entries.
|
|
|
|
# * `new_last_token` is the new position in stream.
|
|
|
|
# * `limited` is whether there are more updates to fetch.
|
|
|
|
#
|
|
|
|
StreamUpdateResult = Tuple[List[Tuple[Token, StreamRow]], Token, bool]
|
|
|
|
|
|
|
|
# The type of an update_function for a stream
|
|
|
|
#
|
|
|
|
# The arguments are:
|
|
|
|
#
|
2020-05-01 12:19:56 -04:00
|
|
|
# * instance_name: the writer of the stream
|
2020-04-17 09:49:55 -04:00
|
|
|
# * from_token: the previous stream token: the starting point for fetching the
|
|
|
|
# updates
|
|
|
|
# * to_token: the new stream token: the point to get updates up to
|
2020-04-23 10:45:12 -04:00
|
|
|
# * target_row_count: a target for the number of rows to be returned.
|
|
|
|
#
|
|
|
|
# The update_function is expected to return up to _approximately_ target_row_count rows.
|
|
|
|
# If there are more updates available, it should set `limited` in the result, and
|
|
|
|
# it will be called again to get the next batch.
|
2020-04-17 09:49:55 -04:00
|
|
|
#
|
2020-05-01 12:19:56 -04:00
|
|
|
UpdateFunction = Callable[[str, Token, Token, int], Awaitable[StreamUpdateResult]]
|
2020-03-25 10:54:01 -04:00
|
|
|
|
|
|
|
|
2020-09-04 06:54:56 -04:00
|
|
|
class Stream:
|
2017-03-27 09:58:26 -04:00
|
|
|
"""Base class for the streams.
|
|
|
|
|
|
|
|
Provides a `get_updates()` function that returns new updates since the last
|
2020-03-20 10:40:47 -04:00
|
|
|
time it was called.
|
2017-03-27 09:58:26 -04:00
|
|
|
"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-07-15 06:02:43 -04:00
|
|
|
NAME: str # The name of the stream
|
2020-01-14 09:08:06 -05:00
|
|
|
# The type of the row. Used by the default impl of parse_row.
|
2021-07-15 06:02:43 -04:00
|
|
|
ROW_TYPE: Any = None
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2019-03-27 03:40:32 -04:00
|
|
|
@classmethod
|
2022-02-08 11:03:08 -05:00
|
|
|
def parse_row(cls, row: StreamRow) -> Any:
|
2019-03-27 03:40:32 -04:00
|
|
|
"""Parse a row received over replication
|
|
|
|
|
|
|
|
By default, assumes that the row data is an array object and passes its contents
|
|
|
|
to the constructor of the ROW_TYPE for this stream.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
row: row data from the incoming RDATA command, after json decoding
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
ROW_TYPE object for this stream
|
|
|
|
"""
|
|
|
|
return cls.ROW_TYPE(*row)
|
|
|
|
|
2020-04-17 09:49:55 -04:00
|
|
|
def __init__(
|
|
|
|
self,
|
2020-05-01 12:19:56 -04:00
|
|
|
local_instance_name: str,
|
2020-04-17 09:49:55 -04:00
|
|
|
update_function: UpdateFunction,
|
|
|
|
):
|
|
|
|
"""Instantiate a Stream
|
|
|
|
|
2020-05-07 08:51:08 -04:00
|
|
|
`update_function` is called to get updates for this stream between a
|
|
|
|
pair of stream tokens. See the `UpdateFunction` type definition for more
|
|
|
|
info.
|
2020-04-17 09:49:55 -04:00
|
|
|
|
|
|
|
Args:
|
2020-05-01 12:19:56 -04:00
|
|
|
local_instance_name: The instance name of the current process
|
2020-04-17 09:49:55 -04:00
|
|
|
current_token_function: callback to get the current token, as above
|
|
|
|
update_function: callback go get stream updates, as above
|
|
|
|
"""
|
2020-05-01 12:19:56 -04:00
|
|
|
self.local_instance_name = local_instance_name
|
2020-04-17 09:49:55 -04:00
|
|
|
self.update_function = update_function
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
# The token from which we last asked for updates
|
2020-05-07 08:51:08 -04:00
|
|
|
self.last_token = self.current_token(self.local_instance_name)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
def current_token(self, instance_name: str) -> Token:
|
|
|
|
"""This takes an instance name, which is a writer to
|
|
|
|
the stream, and returns the position in the stream of the writer (as
|
|
|
|
viewed from the current process).
|
|
|
|
"""
|
|
|
|
# We can't make this an abstract class as it makes mypy unhappy.
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def minimal_local_current_token(self) -> Token:
|
|
|
|
"""Tries to return a minimal current token for the local instance,
|
|
|
|
i.e. for writers this would be the last successful write.
|
|
|
|
|
|
|
|
If local instance is not a writer (or has written yet) then falls back
|
|
|
|
to returning the normal "current token".
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2023-11-16 07:32:17 -05:00
|
|
|
def can_discard_position(
|
|
|
|
self, instance_name: str, prev_token: int, new_token: int
|
|
|
|
) -> bool:
|
|
|
|
"""Whether or not a position command for this stream can be discarded.
|
|
|
|
|
|
|
|
Useful for streams that can never go backwards and where we already know
|
|
|
|
the stream ID for the instance has advanced.
|
|
|
|
"""
|
|
|
|
return False
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def discard_updates_and_advance(self) -> None:
|
2017-04-04 08:19:26 -04:00
|
|
|
"""Called when the stream should advance but the updates would be discarded,
|
|
|
|
e.g. when there are no currently connected workers.
|
|
|
|
"""
|
2020-05-07 08:51:08 -04:00
|
|
|
self.last_token = self.current_token(self.local_instance_name)
|
2017-04-04 08:19:26 -04:00
|
|
|
|
2020-04-17 09:49:55 -04:00
|
|
|
async def get_updates(self) -> StreamUpdateResult:
|
2017-03-27 09:58:26 -04:00
|
|
|
"""Gets all updates since the last time this function was called (or
|
2020-03-20 10:40:47 -04:00
|
|
|
since the stream was constructed if it hadn't been called before).
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-03-25 10:54:01 -04:00
|
|
|
A triplet `(updates, new_last_token, limited)`, where `updates` is
|
|
|
|
a list of `(token, row)` entries, `new_last_token` is the new
|
2023-03-30 06:51:35 -04:00
|
|
|
position in stream (ie the highest token returned in the updates),
|
|
|
|
and `limited` is whether there are more updates to fetch.
|
2017-03-27 09:58:26 -04:00
|
|
|
"""
|
2020-05-07 08:51:08 -04:00
|
|
|
current_token = self.current_token(self.local_instance_name)
|
2023-10-27 11:07:11 -04:00
|
|
|
|
|
|
|
# If the minimum current token for the local instance is less than or
|
|
|
|
# equal to the last thing we published, we know that there are no
|
|
|
|
# updates.
|
|
|
|
if self.last_token >= self.minimal_local_current_token():
|
|
|
|
self.last_token = current_token
|
|
|
|
return [], current_token, False
|
|
|
|
|
2020-03-25 10:54:01 -04:00
|
|
|
updates, current_token, limited = await self.get_updates_since(
|
2020-05-01 12:19:56 -04:00
|
|
|
self.local_instance_name, self.last_token, current_token
|
2020-03-25 10:54:01 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
self.last_token = current_token
|
|
|
|
|
2020-03-25 10:54:01 -04:00
|
|
|
return updates, current_token, limited
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2020-03-20 10:40:47 -04:00
|
|
|
async def get_updates_since(
|
2020-05-01 12:19:56 -04:00
|
|
|
self, instance_name: str, from_token: Token, upto_token: Token
|
2020-04-17 09:49:55 -04:00
|
|
|
) -> StreamUpdateResult:
|
2017-03-27 09:58:26 -04:00
|
|
|
"""Like get_updates except allows specifying from when we should
|
|
|
|
stream updates
|
|
|
|
|
|
|
|
Returns:
|
2020-03-25 10:54:01 -04:00
|
|
|
A triplet `(updates, new_last_token, limited)`, where `updates` is
|
|
|
|
a list of `(token, row)` entries, `new_last_token` is the new
|
|
|
|
position in stream, and `limited` is whether there are more updates
|
|
|
|
to fetch.
|
2017-03-27 09:58:26 -04:00
|
|
|
"""
|
2020-03-20 10:40:47 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
from_token = int(from_token)
|
|
|
|
|
2020-03-25 10:54:01 -04:00
|
|
|
if from_token == upto_token:
|
|
|
|
return [], upto_token, False
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2020-03-25 10:54:01 -04:00
|
|
|
updates, upto_token, limited = await self.update_function(
|
2020-05-01 12:19:56 -04:00
|
|
|
instance_name,
|
|
|
|
from_token,
|
|
|
|
upto_token,
|
|
|
|
_STREAM_UPDATE_TARGET_ROW_COUNT,
|
2020-03-20 10:40:47 -04:00
|
|
|
)
|
2020-03-25 10:54:01 -04:00
|
|
|
return updates, upto_token, limited
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
class _StreamFromIdGen(Stream):
|
|
|
|
"""Helper class for simple streams that use a stream ID generator"""
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
local_instance_name: str,
|
|
|
|
update_function: UpdateFunction,
|
|
|
|
stream_id_gen: "AbstractStreamIdGenerator",
|
|
|
|
):
|
|
|
|
self._stream_id_gen = stream_id_gen
|
|
|
|
super().__init__(local_instance_name, update_function)
|
|
|
|
|
|
|
|
def current_token(self, instance_name: str) -> Token:
|
|
|
|
return self._stream_id_gen.get_current_token_for_writer(instance_name)
|
|
|
|
|
|
|
|
def minimal_local_current_token(self) -> Token:
|
|
|
|
return self._stream_id_gen.get_minimal_local_current_token()
|
|
|
|
|
2023-11-16 07:32:17 -05:00
|
|
|
def can_discard_position(
|
|
|
|
self, instance_name: str, prev_token: int, new_token: int
|
|
|
|
) -> bool:
|
|
|
|
# These streams can't go backwards, so we know we can ignore any
|
|
|
|
# positions where the tokens are from before the current token.
|
|
|
|
|
|
|
|
return new_token <= self.current_token(instance_name)
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
|
2020-05-07 08:51:08 -04:00
|
|
|
def current_token_without_instance(
|
|
|
|
current_token: Callable[[], int]
|
|
|
|
) -> Callable[[str], int]:
|
|
|
|
"""Takes a current token callback function for a single writer stream
|
|
|
|
that doesn't take an instance name parameter and wraps it in a function that
|
|
|
|
does accept an instance name parameter but ignores it.
|
|
|
|
"""
|
|
|
|
return lambda instance_name: current_token()
|
|
|
|
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def make_http_update_function(hs: "HomeServer", stream_name: str) -> UpdateFunction:
|
2020-03-25 10:54:01 -04:00
|
|
|
"""Makes a suitable function for use as an `update_function` that queries
|
|
|
|
the master process for updates.
|
|
|
|
"""
|
|
|
|
|
|
|
|
client = ReplicationGetStreamUpdates.make_client(hs)
|
|
|
|
|
|
|
|
async def update_function(
|
2020-05-01 12:19:56 -04:00
|
|
|
instance_name: str, from_token: int, upto_token: int, limit: int
|
2020-04-17 09:49:55 -04:00
|
|
|
) -> StreamUpdateResult:
|
2020-04-07 06:01:04 -04:00
|
|
|
result = await client(
|
2020-05-01 12:19:56 -04:00
|
|
|
instance_name=instance_name,
|
|
|
|
stream_name=stream_name,
|
|
|
|
from_token=from_token,
|
|
|
|
upto_token=upto_token,
|
2020-03-25 10:54:01 -04:00
|
|
|
)
|
2020-04-07 06:01:04 -04:00
|
|
|
return result["updates"], result["upto_token"], result["limited"]
|
2020-03-25 10:54:01 -04:00
|
|
|
|
|
|
|
return update_function
|
|
|
|
|
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
class BackfillStream(Stream):
|
|
|
|
"""We fetched some old events and either we had never seen that event before
|
|
|
|
or it went from being an outlier to not.
|
|
|
|
"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class BackfillStreamRow:
|
|
|
|
event_id: str
|
|
|
|
room_id: str
|
|
|
|
type: str
|
|
|
|
state_key: Optional[str]
|
|
|
|
redacts: Optional[str]
|
|
|
|
relates_to: Optional[str]
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "backfill"
|
|
|
|
ROW_TYPE = BackfillStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2022-02-23 06:04:02 -05:00
|
|
|
self.store = hs.get_datastores().main
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2020-10-09 08:10:33 -04:00
|
|
|
self.store.get_all_new_backfill_event_rows,
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
def current_token(self, instance_name: str) -> Token:
|
2020-10-09 08:10:33 -04:00
|
|
|
# The backfill stream over replication operates on *positive* numbers,
|
|
|
|
# which means we need to negate it.
|
|
|
|
return -self.store._backfill_id_gen.get_current_token_for_writer(instance_name)
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
def minimal_local_current_token(self) -> Token:
|
|
|
|
# The backfill stream over replication operates on *positive* numbers,
|
|
|
|
# which means we need to negate it.
|
|
|
|
return -self.store._backfill_id_gen.get_minimal_local_current_token()
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-11-17 09:14:29 -05:00
|
|
|
def can_discard_position(
|
|
|
|
self, instance_name: str, prev_token: int, new_token: int
|
|
|
|
) -> bool:
|
|
|
|
# Backfill stream can't go backwards, so we know we can ignore any
|
|
|
|
# positions where the tokens are from before the current token.
|
|
|
|
|
|
|
|
return new_token <= self.current_token(instance_name)
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
|
|
|
|
class PresenceStream(_StreamFromIdGen):
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class PresenceStreamRow:
|
|
|
|
user_id: str
|
|
|
|
state: str
|
|
|
|
last_active_ts: int
|
|
|
|
last_federation_update_ts: int
|
|
|
|
last_user_sync_ts: int
|
|
|
|
status_msg: str
|
|
|
|
currently_active: bool
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "presence"
|
|
|
|
ROW_TYPE = PresenceStreamRow
|
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2022-02-23 06:04:02 -05:00
|
|
|
store = hs.get_datastores().main
|
2020-03-25 10:54:01 -04:00
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.presence:
|
|
|
|
# on the presence writer, query the presence handler
|
2020-04-17 09:49:55 -04:00
|
|
|
presence_handler = hs.get_presence_handler()
|
2021-04-23 07:21:55 -04:00
|
|
|
|
|
|
|
from synapse.handlers.presence import PresenceHandler
|
|
|
|
|
|
|
|
assert isinstance(presence_handler, PresenceHandler)
|
|
|
|
|
2021-07-15 06:02:43 -04:00
|
|
|
update_function: UpdateFunction = presence_handler.get_all_presence_updates
|
2020-03-25 10:54:01 -04:00
|
|
|
else:
|
2021-04-23 07:21:55 -04:00
|
|
|
# Query presence writer process
|
2020-04-17 09:49:55 -04:00
|
|
|
update_function = make_http_update_function(hs, self.NAME)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2020-05-01 12:19:56 -04:00
|
|
|
super().__init__(
|
2023-10-23 11:57:30 -04:00
|
|
|
hs.get_instance_name(), update_function, store._presence_id_gen
|
2020-05-01 12:19:56 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
2021-04-20 09:11:24 -04:00
|
|
|
class PresenceFederationStream(Stream):
|
|
|
|
"""A stream used to send ad hoc presence updates over federation.
|
|
|
|
|
|
|
|
Streams the remote destination and the user ID of the presence state to
|
|
|
|
send.
|
|
|
|
"""
|
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2021-04-20 09:11:24 -04:00
|
|
|
class PresenceFederationStreamRow:
|
|
|
|
destination: str
|
|
|
|
user_id: str
|
|
|
|
|
|
|
|
NAME = "presence_federation"
|
|
|
|
ROW_TYPE = PresenceFederationStreamRow
|
|
|
|
|
|
|
|
def __init__(self, hs: "HomeServer"):
|
2023-10-23 11:57:30 -04:00
|
|
|
self._federation_queue = hs.get_presence_handler().get_federation_queue()
|
2021-04-20 09:11:24 -04:00
|
|
|
super().__init__(
|
|
|
|
hs.get_instance_name(),
|
2023-10-23 11:57:30 -04:00
|
|
|
self._federation_queue.get_replication_rows,
|
2021-04-20 09:11:24 -04:00
|
|
|
)
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
def current_token(self, instance_name: str) -> Token:
|
|
|
|
return self._federation_queue.get_current_token(instance_name)
|
|
|
|
|
|
|
|
def minimal_local_current_token(self) -> Token:
|
|
|
|
return self._federation_queue.get_current_token(self.local_instance_name)
|
|
|
|
|
2021-04-20 09:11:24 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
class TypingStream(Stream):
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class TypingStreamRow:
|
2022-03-11 09:00:15 -05:00
|
|
|
"""
|
|
|
|
An entry in the typing stream.
|
|
|
|
Describes all the users that are 'typing' right now in one room.
|
|
|
|
|
|
|
|
When a user stops typing, it will be streamed as a new update with that
|
|
|
|
user absent; you can think of the `user_ids` list as overwriting the
|
|
|
|
entire list that was there previously.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# The room that this update is for.
|
2021-12-30 13:47:12 -05:00
|
|
|
room_id: str
|
2022-03-11 09:00:15 -05:00
|
|
|
|
|
|
|
# All the users that are 'typing' right now in the specified room.
|
2021-12-30 13:47:12 -05:00
|
|
|
user_ids: List[str]
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "typing"
|
|
|
|
ROW_TYPE = TypingStreamRow
|
|
|
|
|
2021-03-17 11:30:21 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2021-11-03 10:25:47 -04:00
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.typing:
|
2020-07-16 10:12:54 -04:00
|
|
|
# On the writer, query the typing handler
|
2021-03-17 11:30:21 -04:00
|
|
|
typing_writer_handler = hs.get_typing_writer_handler()
|
2021-07-15 06:02:43 -04:00
|
|
|
update_function: Callable[
|
|
|
|
[str, int, int, int], Awaitable[Tuple[List[Tuple[int, Any]], int, bool]]
|
|
|
|
] = typing_writer_handler.get_all_typing_updates
|
2023-10-23 11:57:30 -04:00
|
|
|
self.current_token_function = typing_writer_handler.get_current_token
|
2020-03-25 10:54:01 -04:00
|
|
|
else:
|
2020-07-16 10:12:54 -04:00
|
|
|
# Query the typing writer process
|
2020-04-17 09:49:55 -04:00
|
|
|
update_function = make_http_update_function(hs, self.NAME)
|
2023-10-23 11:57:30 -04:00
|
|
|
self.current_token_function = hs.get_typing_handler().get_current_token
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2020-05-01 12:19:56 -04:00
|
|
|
super().__init__(
|
2020-05-07 08:51:08 -04:00
|
|
|
hs.get_instance_name(),
|
|
|
|
update_function,
|
2020-05-01 12:19:56 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
def current_token(self, instance_name: str) -> Token:
|
|
|
|
return self.current_token_function()
|
|
|
|
|
|
|
|
def minimal_local_current_token(self) -> Token:
|
|
|
|
return self.current_token_function()
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
|
|
|
|
class ReceiptsStream(_StreamFromIdGen):
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class ReceiptsStreamRow:
|
|
|
|
room_id: str
|
|
|
|
receipt_type: str
|
|
|
|
user_id: str
|
|
|
|
event_id: str
|
2022-09-23 10:33:28 -04:00
|
|
|
thread_id: Optional[str]
|
2021-12-30 13:47:12 -05:00
|
|
|
data: dict
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "receipts"
|
|
|
|
ROW_TYPE = ReceiptsStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2022-02-23 06:04:02 -05:00
|
|
|
store = hs.get_datastores().main
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2020-06-16 12:10:28 -04:00
|
|
|
store.get_all_updated_receipts,
|
2023-10-23 11:57:30 -04:00
|
|
|
store._receipts_id_gen,
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
class PushRulesStream(_StreamFromIdGen):
|
2017-03-27 09:58:26 -04:00
|
|
|
"""A user has changed their push rules"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class PushRulesStreamRow:
|
|
|
|
user_id: str
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "push_rules"
|
|
|
|
ROW_TYPE = PushRulesStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2023-10-23 11:57:30 -04:00
|
|
|
store = hs.get_datastores().main
|
2020-06-16 12:10:28 -04:00
|
|
|
|
2020-09-18 09:56:44 -04:00
|
|
|
super().__init__(
|
2020-06-16 12:10:28 -04:00
|
|
|
hs.get_instance_name(),
|
2023-10-23 11:57:30 -04:00
|
|
|
store.get_all_push_rule_updates,
|
|
|
|
store._push_rules_stream_id_gen,
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
class PushersStream(_StreamFromIdGen):
|
2017-03-27 09:58:26 -04:00
|
|
|
"""A user has added/changed/removed a pusher"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class PushersStreamRow:
|
|
|
|
user_id: str
|
|
|
|
app_id: str
|
|
|
|
pushkey: str
|
|
|
|
deleted: bool
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "pushers"
|
|
|
|
ROW_TYPE = PushersStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2022-02-23 06:04:02 -05:00
|
|
|
store = hs.get_datastores().main
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2020-07-07 07:11:35 -04:00
|
|
|
store.get_all_updated_pushers_rows,
|
2023-10-23 11:57:30 -04:00
|
|
|
store._pushers_id_gen,
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
|
|
|
class CachesStream(Stream):
|
|
|
|
"""A cache was invalidated on the master and no other stream would invalidate
|
|
|
|
the cache on the workers
|
|
|
|
"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-03-23 09:59:11 -04:00
|
|
|
class CachesStreamRow:
|
|
|
|
"""Stream to inform workers they should invalidate their cache.
|
|
|
|
|
|
|
|
Attributes:
|
|
|
|
cache_func: Name of the cached function.
|
|
|
|
keys: The entry in the cache to invalidate. If None then will
|
|
|
|
invalidate all.
|
|
|
|
invalidation_ts: Timestamp of when the invalidation took place.
|
|
|
|
"""
|
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
cache_func: str
|
|
|
|
keys: Optional[List[Any]]
|
|
|
|
invalidation_ts: int
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "caches"
|
|
|
|
ROW_TYPE = CachesStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2023-10-23 11:57:30 -04:00
|
|
|
self.store = hs.get_datastores().main
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2023-10-23 11:57:30 -04:00
|
|
|
self.store.get_all_updated_caches,
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
def current_token(self, instance_name: str) -> Token:
|
|
|
|
return self.store.get_cache_stream_token_for_writer(instance_name)
|
|
|
|
|
|
|
|
def minimal_local_current_token(self) -> Token:
|
2023-10-27 11:07:11 -04:00
|
|
|
if self.store._cache_id_gen:
|
|
|
|
return self.store._cache_id_gen.get_minimal_local_current_token()
|
2023-10-23 11:57:30 -04:00
|
|
|
return self.current_token(self.local_instance_name)
|
|
|
|
|
2023-11-17 09:14:29 -05:00
|
|
|
def can_discard_position(
|
|
|
|
self, instance_name: str, prev_token: int, new_token: int
|
|
|
|
) -> bool:
|
|
|
|
# Caches streams can't go backwards, so we know we can ignore any
|
|
|
|
# positions where the tokens are from before the current token.
|
|
|
|
|
|
|
|
return new_token <= self.current_token(instance_name)
|
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
class DeviceListsStream(_StreamFromIdGen):
|
2020-02-28 06:24:05 -05:00
|
|
|
"""Either a user has updated their devices or a remote server needs to be
|
|
|
|
told about a device update.
|
2017-03-27 09:58:26 -04:00
|
|
|
"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-03-23 09:59:11 -04:00
|
|
|
class DeviceListsStreamRow:
|
2021-12-30 13:47:12 -05:00
|
|
|
entity: str
|
2023-01-17 04:29:58 -05:00
|
|
|
# Indicates that a user has signed their own device with their user-signing key
|
|
|
|
is_signature: bool
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "device_lists"
|
|
|
|
ROW_TYPE = DeviceListsStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2023-01-17 04:29:58 -05:00
|
|
|
self.store = hs.get_datastores().main
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2023-01-17 04:29:58 -05:00
|
|
|
self._update_function,
|
2023-10-23 11:57:30 -04:00
|
|
|
self.store._device_list_id_gen,
|
2023-01-17 04:29:58 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
async def _update_function(
|
|
|
|
self,
|
|
|
|
instance_name: str,
|
|
|
|
from_token: Token,
|
|
|
|
current_token: Token,
|
|
|
|
target_row_count: int,
|
|
|
|
) -> StreamUpdateResult:
|
|
|
|
(
|
|
|
|
device_updates,
|
|
|
|
devices_to_token,
|
|
|
|
devices_limited,
|
|
|
|
) = await self.store.get_all_device_list_changes_for_remotes(
|
|
|
|
instance_name, from_token, current_token, target_row_count
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-01-17 04:29:58 -05:00
|
|
|
(
|
|
|
|
signatures_updates,
|
|
|
|
signatures_to_token,
|
|
|
|
signatures_limited,
|
|
|
|
) = await self.store.get_all_user_signature_changes_for_remotes(
|
|
|
|
instance_name, from_token, current_token, target_row_count
|
|
|
|
)
|
|
|
|
|
|
|
|
upper_limit_token = current_token
|
|
|
|
if devices_limited:
|
|
|
|
upper_limit_token = min(upper_limit_token, devices_to_token)
|
|
|
|
if signatures_limited:
|
|
|
|
upper_limit_token = min(upper_limit_token, signatures_to_token)
|
|
|
|
|
|
|
|
device_updates = [
|
|
|
|
(stream_id, (entity, False))
|
|
|
|
for stream_id, (entity,) in device_updates
|
|
|
|
if stream_id <= upper_limit_token
|
|
|
|
]
|
|
|
|
|
|
|
|
signatures_updates = [
|
|
|
|
(stream_id, (entity, True))
|
|
|
|
for stream_id, (entity,) in signatures_updates
|
|
|
|
if stream_id <= upper_limit_token
|
|
|
|
]
|
|
|
|
|
|
|
|
updates = list(
|
|
|
|
heapq.merge(device_updates, signatures_updates, key=lambda row: row[0])
|
|
|
|
)
|
|
|
|
|
|
|
|
return updates, upper_limit_token, devices_limited or signatures_limited
|
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
class ToDeviceStream(_StreamFromIdGen):
|
2017-03-27 09:58:26 -04:00
|
|
|
"""New to_device messages for a client"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class ToDeviceStreamRow:
|
|
|
|
entity: str
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "to_device"
|
|
|
|
ROW_TYPE = ToDeviceStreamRow
|
|
|
|
|
2021-10-22 13:15:41 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2022-02-23 06:04:02 -05:00
|
|
|
store = hs.get_datastores().main
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2020-07-07 07:11:35 -04:00
|
|
|
store.get_all_new_device_messages,
|
2023-11-24 08:42:38 -05:00
|
|
|
store._to_device_msg_id_gen,
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
|
|
|
|
2023-10-23 11:57:30 -04:00
|
|
|
class AccountDataStream(_StreamFromIdGen):
|
2017-03-27 09:58:26 -04:00
|
|
|
"""Global or per room account data was changed"""
|
2019-06-20 05:32:02 -04:00
|
|
|
|
2021-12-30 13:47:12 -05:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class AccountDataStreamRow:
|
|
|
|
user_id: str
|
|
|
|
room_id: Optional[str]
|
|
|
|
data_type: str
|
2020-03-23 09:59:11 -04:00
|
|
|
|
2017-03-27 09:58:26 -04:00
|
|
|
NAME = "account_data"
|
|
|
|
ROW_TYPE = AccountDataStreamRow
|
|
|
|
|
2021-03-17 11:30:21 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2022-02-23 06:04:02 -05:00
|
|
|
self.store = hs.get_datastores().main
|
2020-04-17 09:49:55 -04:00
|
|
|
super().__init__(
|
2020-05-01 12:19:56 -04:00
|
|
|
hs.get_instance_name(),
|
2020-05-15 14:03:25 -04:00
|
|
|
self._update_function,
|
2023-10-23 11:57:30 -04:00
|
|
|
self.store._account_data_id_gen,
|
2020-05-15 14:03:25 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
async def _update_function(
|
|
|
|
self, instance_name: str, from_token: int, to_token: int, limit: int
|
|
|
|
) -> StreamUpdateResult:
|
|
|
|
limited = False
|
|
|
|
global_results = await self.store.get_updated_global_account_data(
|
|
|
|
from_token, to_token, limit
|
2020-04-17 09:49:55 -04:00
|
|
|
)
|
2017-03-27 09:58:26 -04:00
|
|
|
|
2020-05-15 14:03:25 -04:00
|
|
|
# if the global results hit the limit, we'll need to limit the room results to
|
|
|
|
# the same stream token.
|
|
|
|
if len(global_results) >= limit:
|
|
|
|
to_token = global_results[-1][0]
|
|
|
|
limited = True
|
|
|
|
|
|
|
|
room_results = await self.store.get_updated_room_account_data(
|
|
|
|
from_token, to_token, limit
|
2017-03-27 09:58:26 -04:00
|
|
|
)
|
|
|
|
|
2020-05-15 14:03:25 -04:00
|
|
|
# likewise, if the room results hit the limit, limit the global results to
|
|
|
|
# the same stream token.
|
|
|
|
if len(room_results) >= limit:
|
|
|
|
to_token = room_results[-1][0]
|
|
|
|
limited = True
|
|
|
|
|
2023-01-13 09:57:43 -05:00
|
|
|
tags, tag_to_token, tags_limited = await self.store.get_all_updated_tags(
|
|
|
|
instance_name,
|
|
|
|
from_token,
|
|
|
|
to_token,
|
|
|
|
limit,
|
|
|
|
)
|
|
|
|
|
|
|
|
# again, if the tag results hit the limit, limit the global results to
|
|
|
|
# the same stream token.
|
|
|
|
if tags_limited:
|
|
|
|
to_token = tag_to_token
|
|
|
|
limited = True
|
|
|
|
|
2020-05-15 14:03:25 -04:00
|
|
|
# convert the global results to the right format, and limit them to the to_token
|
|
|
|
# at the same time
|
|
|
|
global_rows = (
|
|
|
|
(stream_id, (user_id, None, account_data_type))
|
2019-11-08 06:42:55 -05:00
|
|
|
for stream_id, user_id, account_data_type in global_results
|
2020-05-15 14:03:25 -04:00
|
|
|
if stream_id <= to_token
|
|
|
|
)
|
|
|
|
|
|
|
|
room_rows = (
|
|
|
|
(stream_id, (user_id, room_id, account_data_type))
|
|
|
|
for stream_id, user_id, room_id, account_data_type in room_results
|
2023-01-13 09:57:43 -05:00
|
|
|
if stream_id <= to_token
|
|
|
|
)
|
|
|
|
|
|
|
|
tag_rows = (
|
|
|
|
(stream_id, (user_id, room_id, AccountDataTypes.TAG))
|
|
|
|
for stream_id, user_id, room_id in tags
|
|
|
|
if stream_id <= to_token
|
2017-03-27 09:58:26 -04:00
|
|
|
)
|
|
|
|
|
2020-06-09 11:28:57 -04:00
|
|
|
# We need to return a sorted list, so merge them together.
|
|
|
|
#
|
|
|
|
# Note: We order only by the stream ID to work around a bug where the
|
|
|
|
# same stream ID could appear in both `global_rows` and `room_rows`,
|
|
|
|
# leading to a comparison between the data tuples. The comparison could
|
|
|
|
# fail due to attempting to compare the `room_id` which results in a
|
|
|
|
# `TypeError` from comparing a `str` vs `None`.
|
2023-01-13 09:57:43 -05:00
|
|
|
updates = list(
|
|
|
|
heapq.merge(room_rows, global_rows, tag_rows, key=lambda row: row[0])
|
|
|
|
)
|
2020-05-15 14:03:25 -04:00
|
|
|
return updates, to_token, limited
|