2019-10-21 07:56:42 -04:00
|
|
|
#
|
2023-11-21 15:29:58 -05:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
2024-01-23 06:26:48 -05:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2023-11-21 15:29:58 -05:00
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2019-10-21 07:56:42 -04:00
|
|
|
#
|
|
|
|
#
|
2023-09-19 15:26:44 -04:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Mapping,
|
|
|
|
Optional,
|
|
|
|
Tuple,
|
2023-10-06 11:41:57 -04:00
|
|
|
Union,
|
2023-09-19 15:26:44 -04:00
|
|
|
cast,
|
|
|
|
)
|
2020-06-16 12:10:28 -04:00
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
from synapse.api.presence import PresenceState, UserPresenceState
|
|
|
|
from synapse.replication.tcp.streams import PresenceStream
|
2019-10-21 07:56:42 -04:00
|
|
|
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
|
2022-02-21 11:03:06 -05:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2022-05-31 09:01:05 -04:00
|
|
|
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
2023-07-05 06:44:02 -04:00
|
|
|
from synapse.storage.engines._base import IsolationLevel
|
2021-04-23 07:21:55 -04:00
|
|
|
from synapse.storage.types import Connection
|
2022-02-21 11:03:06 -05:00
|
|
|
from synapse.storage.util.id_generators import (
|
|
|
|
AbstractStreamIdGenerator,
|
|
|
|
MultiWriterIdGenerator,
|
|
|
|
)
|
2019-10-21 07:56:42 -04:00
|
|
|
from synapse.util.caches.descriptors import cached, cachedList
|
2021-04-23 07:21:55 -04:00
|
|
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
2020-01-14 06:58:02 -05:00
|
|
|
from synapse.util.iterutils import batch_iter
|
2019-10-21 07:56:42 -04:00
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2019-10-21 07:56:42 -04:00
|
|
|
|
2021-09-03 12:16:56 -04:00
|
|
|
class PresenceBackgroundUpdateStore(SQLBaseStore):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
2021-12-13 12:05:00 -05:00
|
|
|
db_conn: LoggingDatabaseConnection,
|
2021-09-03 12:16:56 -04:00
|
|
|
hs: "HomeServer",
|
2022-02-21 11:03:06 -05:00
|
|
|
) -> None:
|
2021-09-03 12:16:56 -04:00
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
|
|
|
# Used by `PresenceStore._get_active_presence()`
|
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
"presence_stream_not_offline_index",
|
|
|
|
index_name="presence_stream_state_not_offline_idx",
|
|
|
|
table="presence_stream",
|
|
|
|
columns=["state"],
|
|
|
|
where_clause="state != 'offline'",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-05-31 09:01:05 -04:00
|
|
|
class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore):
|
2021-04-23 07:21:55 -04:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
2021-12-13 12:05:00 -05:00
|
|
|
db_conn: LoggingDatabaseConnection,
|
2021-04-23 07:21:55 -04:00
|
|
|
hs: "HomeServer",
|
2022-02-21 11:03:06 -05:00
|
|
|
) -> None:
|
2021-04-23 07:21:55 -04:00
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
self._instance_name = hs.get_instance_name()
|
|
|
|
self._presence_id_gen: AbstractStreamIdGenerator
|
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
self._can_persist_presence = (
|
2022-02-21 11:03:06 -05:00
|
|
|
self._instance_name in hs.config.worker.writers.presence
|
2021-04-23 07:21:55 -04:00
|
|
|
)
|
|
|
|
|
2024-05-29 08:19:10 -04:00
|
|
|
self._presence_id_gen = MultiWriterIdGenerator(
|
|
|
|
db_conn=db_conn,
|
|
|
|
db=database,
|
|
|
|
notifier=hs.get_replication_notifier(),
|
|
|
|
stream_name="presence_stream",
|
|
|
|
instance_name=self._instance_name,
|
|
|
|
tables=[("presence_stream", "instance_name", "stream_id")],
|
|
|
|
sequence_name="presence_stream_sequence",
|
|
|
|
writers=hs.config.worker.writers.presence,
|
|
|
|
)
|
2021-04-23 07:21:55 -04:00
|
|
|
|
2021-05-18 09:13:45 -04:00
|
|
|
self.hs = hs
|
2021-04-23 07:21:55 -04:00
|
|
|
self._presence_on_startup = self._get_active_presence(db_conn)
|
|
|
|
|
|
|
|
presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict(
|
|
|
|
db_conn,
|
|
|
|
"presence_stream",
|
|
|
|
entity_column="user_id",
|
|
|
|
stream_column="stream_id",
|
|
|
|
max_value=self._presence_id_gen.get_current_token(),
|
|
|
|
)
|
|
|
|
self.presence_stream_cache = StreamChangeCache(
|
|
|
|
"PresenceStreamChangeCache",
|
|
|
|
min_presence_val,
|
|
|
|
prefilled_cache=presence_cache_prefill,
|
|
|
|
)
|
|
|
|
|
2022-04-27 08:05:00 -04:00
|
|
|
async def update_presence(
|
|
|
|
self, presence_states: List[UserPresenceState]
|
|
|
|
) -> Tuple[int, int]:
|
2021-04-23 07:21:55 -04:00
|
|
|
assert self._can_persist_presence
|
|
|
|
|
2020-09-23 11:11:18 -04:00
|
|
|
stream_ordering_manager = self._presence_id_gen.get_next_mult(
|
2019-10-21 07:56:42 -04:00
|
|
|
len(presence_states)
|
|
|
|
)
|
|
|
|
|
2020-09-23 11:11:18 -04:00
|
|
|
async with stream_ordering_manager as stream_orderings:
|
2023-07-05 06:44:02 -04:00
|
|
|
# Run the interaction with an isolation level of READ_COMMITTED to avoid
|
|
|
|
# serialization errors(and rollbacks) in the database. This way it will
|
|
|
|
# ignore new rows during the DELETE, but will pick them up the next time
|
|
|
|
# this is run. Currently, that is between 5-60 seconds.
|
2020-08-12 09:28:48 -04:00
|
|
|
await self.db_pool.runInteraction(
|
2019-10-21 07:56:42 -04:00
|
|
|
"update_presence",
|
|
|
|
self._update_presence_txn,
|
|
|
|
stream_orderings,
|
|
|
|
presence_states,
|
2023-07-05 06:44:02 -04:00
|
|
|
isolation_level=IsolationLevel.READ_COMMITTED,
|
2019-10-21 07:56:42 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
return stream_orderings[-1], self._presence_id_gen.get_current_token()
|
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
def _update_presence_txn(
|
2022-04-27 08:05:00 -04:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
stream_orderings: List[int],
|
|
|
|
presence_states: List[UserPresenceState],
|
2022-02-21 11:03:06 -05:00
|
|
|
) -> None:
|
2019-10-21 07:56:42 -04:00
|
|
|
for stream_id, state in zip(stream_orderings, presence_states):
|
|
|
|
txn.call_after(
|
|
|
|
self.presence_stream_cache.entity_has_changed, state.user_id, stream_id
|
|
|
|
)
|
|
|
|
txn.call_after(self._get_presence_for_user.invalidate, (state.user_id,))
|
|
|
|
|
2021-05-21 07:02:06 -04:00
|
|
|
# Delete old rows to stop database from getting really big
|
|
|
|
sql = "DELETE FROM presence_stream WHERE stream_id < ? AND "
|
|
|
|
|
|
|
|
for states in batch_iter(presence_states, 50):
|
|
|
|
clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine, "user_id", [s.user_id for s in states]
|
|
|
|
)
|
|
|
|
txn.execute(sql + clause, [stream_id] + list(args))
|
|
|
|
|
2019-10-21 07:56:42 -04:00
|
|
|
# Actually insert new rows
|
2020-08-05 16:38:57 -04:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-10-21 07:56:42 -04:00
|
|
|
txn,
|
|
|
|
table="presence_stream",
|
2022-01-13 19:44:18 -05:00
|
|
|
keys=(
|
|
|
|
"stream_id",
|
|
|
|
"user_id",
|
|
|
|
"state",
|
|
|
|
"last_active_ts",
|
|
|
|
"last_federation_update_ts",
|
|
|
|
"last_user_sync_ts",
|
|
|
|
"status_msg",
|
|
|
|
"currently_active",
|
|
|
|
"instance_name",
|
|
|
|
),
|
2019-10-21 07:56:42 -04:00
|
|
|
values=[
|
2022-01-13 19:44:18 -05:00
|
|
|
(
|
|
|
|
stream_id,
|
|
|
|
state.user_id,
|
|
|
|
state.state,
|
|
|
|
state.last_active_ts,
|
|
|
|
state.last_federation_update_ts,
|
|
|
|
state.last_user_sync_ts,
|
|
|
|
state.status_msg,
|
|
|
|
state.currently_active,
|
|
|
|
self._instance_name,
|
|
|
|
)
|
2020-03-20 10:40:47 -04:00
|
|
|
for stream_id, state in zip(stream_orderings, presence_states)
|
2019-10-21 07:56:42 -04:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
async def get_all_presence_updates(
|
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
|
|
|
) -> Tuple[List[Tuple[int, list]], int, bool]:
|
|
|
|
"""Get updates for presence replication stream.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
instance_name: The writer we want to fetch updates from. Unused
|
|
|
|
here since there is only ever one writer.
|
|
|
|
last_id: The token to fetch updates from. Exclusive.
|
|
|
|
current_id: The token to fetch updates up to. Inclusive.
|
|
|
|
limit: The requested limit for the number of rows to return. The
|
|
|
|
function may return more or fewer rows.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple consisting of: the updates, a token to use to fetch
|
|
|
|
subsequent updates, and whether we returned fewer rows than exists
|
|
|
|
between the requested tokens due to the limit.
|
|
|
|
|
|
|
|
The token returned can be used in a subsequent call to this
|
|
|
|
function to get further updatees.
|
|
|
|
|
|
|
|
The updates are a list of 2-tuples of stream ID and the row data
|
|
|
|
"""
|
|
|
|
|
2019-10-21 07:56:42 -04:00
|
|
|
if last_id == current_id:
|
2020-06-16 12:10:28 -04:00
|
|
|
return [], current_id, False
|
2019-10-21 07:56:42 -04:00
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
def get_all_presence_updates_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> Tuple[List[Tuple[int, list]], int, bool]:
|
2020-03-20 10:40:47 -04:00
|
|
|
sql = """
|
|
|
|
SELECT stream_id, user_id, state, last_active_ts,
|
|
|
|
last_federation_update_ts, last_user_sync_ts,
|
2022-02-21 11:03:06 -05:00
|
|
|
status_msg, currently_active
|
2020-03-20 10:40:47 -04:00
|
|
|
FROM presence_stream
|
|
|
|
WHERE ? < stream_id AND stream_id <= ?
|
|
|
|
ORDER BY stream_id ASC
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (last_id, current_id, limit))
|
2022-02-21 11:03:06 -05:00
|
|
|
updates = cast(
|
|
|
|
List[Tuple[int, list]],
|
|
|
|
[(row[0], row[1:]) for row in txn],
|
|
|
|
)
|
2020-06-16 12:10:28 -04:00
|
|
|
|
|
|
|
upper_bound = current_id
|
|
|
|
limited = False
|
|
|
|
if len(updates) >= limit:
|
|
|
|
upper_bound = updates[-1][0]
|
|
|
|
limited = True
|
|
|
|
|
|
|
|
return updates, upper_bound, limited
|
2019-10-21 07:56:42 -04:00
|
|
|
|
2020-08-05 16:38:57 -04:00
|
|
|
return await self.db_pool.runInteraction(
|
2019-10-21 07:56:42 -04:00
|
|
|
"get_all_presence_updates", get_all_presence_updates_txn
|
|
|
|
)
|
|
|
|
|
|
|
|
@cached()
|
2022-02-21 11:03:06 -05:00
|
|
|
def _get_presence_for_user(self, user_id: str) -> None:
|
2019-10-21 07:56:42 -04:00
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
@cachedList(
|
2020-08-14 07:24:26 -04:00
|
|
|
cached_method_name="_get_presence_for_user",
|
|
|
|
list_name="user_ids",
|
|
|
|
num_args=1,
|
2019-10-21 07:56:42 -04:00
|
|
|
)
|
2022-02-21 11:03:06 -05:00
|
|
|
async def get_presence_for_users(
|
|
|
|
self, user_ids: Iterable[str]
|
2023-09-19 15:26:44 -04:00
|
|
|
) -> Mapping[str, UserPresenceState]:
|
2023-10-11 13:24:56 -04:00
|
|
|
# TODO All these columns are nullable, but we don't expect that:
|
|
|
|
# https://github.com/matrix-org/synapse/issues/16467
|
|
|
|
rows = cast(
|
|
|
|
List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]],
|
|
|
|
await self.db_pool.simple_select_many_batch(
|
|
|
|
table="presence_stream",
|
|
|
|
column="user_id",
|
|
|
|
iterable=user_ids,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=(
|
|
|
|
"user_id",
|
|
|
|
"state",
|
|
|
|
"last_active_ts",
|
|
|
|
"last_federation_update_ts",
|
|
|
|
"last_user_sync_ts",
|
|
|
|
"status_msg",
|
|
|
|
"currently_active",
|
|
|
|
),
|
|
|
|
desc="get_presence_for_users",
|
2019-10-21 07:56:42 -04:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2023-10-11 13:24:56 -04:00
|
|
|
return {
|
|
|
|
user_id: UserPresenceState(
|
|
|
|
user_id=user_id,
|
|
|
|
state=state,
|
|
|
|
last_active_ts=last_active_ts,
|
|
|
|
last_federation_update_ts=last_federation_update_ts,
|
|
|
|
last_user_sync_ts=last_user_sync_ts,
|
|
|
|
status_msg=status_msg,
|
|
|
|
currently_active=bool(currently_active),
|
|
|
|
)
|
|
|
|
for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows
|
|
|
|
}
|
2019-10-21 07:56:42 -04:00
|
|
|
|
2021-05-18 09:13:45 -04:00
|
|
|
async def should_user_receive_full_presence_with_token(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
from_token: int,
|
|
|
|
) -> bool:
|
|
|
|
"""Check whether the given user should receive full presence using the stream token
|
|
|
|
they're updating from.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: The ID of the user to check.
|
|
|
|
from_token: The stream token included in their /sync token.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
True if the user should have full presence sent to them, False otherwise.
|
|
|
|
"""
|
|
|
|
|
2022-05-31 09:01:05 -04:00
|
|
|
token = await self._get_full_presence_stream_token_for_user(user_id)
|
|
|
|
if token is None:
|
|
|
|
return False
|
2021-05-18 09:13:45 -04:00
|
|
|
|
2022-05-31 09:01:05 -04:00
|
|
|
return from_token <= token
|
|
|
|
|
|
|
|
@cached()
|
|
|
|
async def _get_full_presence_stream_token_for_user(
|
|
|
|
self, user_id: str
|
|
|
|
) -> Optional[int]:
|
|
|
|
"""Get the presence token corresponding to the last full presence update
|
|
|
|
for this user.
|
|
|
|
|
|
|
|
If the user presents a sync token with a presence stream token at least
|
|
|
|
as old as the result, then we need to send them a full presence update.
|
|
|
|
|
|
|
|
If this user has never needed a full presence update, returns `None`.
|
|
|
|
"""
|
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="users_to_send_full_presence_to",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
retcol="presence_stream_id",
|
|
|
|
allow_none=True,
|
|
|
|
desc="_get_full_presence_stream_token_for_user",
|
2021-05-18 09:13:45 -04:00
|
|
|
)
|
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
async def add_users_to_send_full_presence_to(self, user_ids: Iterable[str]) -> None:
|
2021-05-18 09:13:45 -04:00
|
|
|
"""Adds to the list of users who should receive a full snapshot of presence
|
|
|
|
upon their next sync.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_ids: An iterable of user IDs.
|
|
|
|
"""
|
|
|
|
# Add user entries to the table, updating the presence_stream_id column if the user already
|
|
|
|
# exists in the table.
|
2021-12-13 14:01:27 -05:00
|
|
|
presence_stream_id = self._presence_id_gen.get_current_token()
|
2022-05-31 09:01:05 -04:00
|
|
|
|
|
|
|
def _add_users_to_send_full_presence_to(txn: LoggingTransaction) -> None:
|
|
|
|
self.db_pool.simple_upsert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="users_to_send_full_presence_to",
|
|
|
|
key_names=("user_id",),
|
|
|
|
key_values=[(user_id,) for user_id in user_ids],
|
|
|
|
value_names=("presence_stream_id",),
|
|
|
|
# We save the current presence stream ID token along with the user ID entry so
|
|
|
|
# that when a user /sync's, even if they syncing multiple times across separate
|
|
|
|
# devices at different times, each device will receive full presence once - when
|
|
|
|
# the presence stream ID in their sync token is less than the one in the table
|
|
|
|
# for their user ID.
|
|
|
|
value_values=[(presence_stream_id,) for _ in user_ids],
|
|
|
|
)
|
2023-11-09 14:40:30 -05:00
|
|
|
self._invalidate_cache_and_stream_bulk(
|
|
|
|
txn,
|
|
|
|
self._get_full_presence_stream_token_for_user,
|
|
|
|
[(user_id,) for user_id in user_ids],
|
|
|
|
)
|
2022-05-31 09:01:05 -04:00
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"add_users_to_send_full_presence_to", _add_users_to_send_full_presence_to
|
2021-05-18 09:13:45 -04:00
|
|
|
)
|
|
|
|
|
2021-03-25 06:34:23 -04:00
|
|
|
async def get_presence_for_all_users(
|
|
|
|
self,
|
|
|
|
include_offline: bool = True,
|
|
|
|
) -> Dict[str, UserPresenceState]:
|
|
|
|
"""Retrieve the current presence state for all users.
|
|
|
|
|
|
|
|
Note that the presence_stream table is culled frequently, so it should only
|
|
|
|
contain the latest presence state for each user.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
include_offline: Whether to include offline presence states
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A dict of user IDs to their current UserPresenceState.
|
|
|
|
"""
|
|
|
|
users_to_state = {}
|
|
|
|
|
|
|
|
exclude_keyvalues = None
|
|
|
|
if not include_offline:
|
|
|
|
# Exclude offline presence state
|
|
|
|
exclude_keyvalues = {"state": "offline"}
|
|
|
|
|
|
|
|
# This may be a very heavy database query.
|
|
|
|
# We paginate in order to not block a database connection.
|
|
|
|
limit = 100
|
|
|
|
offset = 0
|
|
|
|
while True:
|
2023-10-11 13:24:56 -04:00
|
|
|
# TODO All these columns are nullable, but we don't expect that:
|
|
|
|
# https://github.com/matrix-org/synapse/issues/16467
|
2023-10-06 11:41:57 -04:00
|
|
|
rows = cast(
|
|
|
|
List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]],
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"get_presence_for_all_users",
|
|
|
|
self.db_pool.simple_select_list_paginate_txn,
|
|
|
|
"presence_stream",
|
|
|
|
orderby="stream_id",
|
|
|
|
start=offset,
|
|
|
|
limit=limit,
|
|
|
|
exclude_keyvalues=exclude_keyvalues,
|
|
|
|
retcols=(
|
|
|
|
"user_id",
|
|
|
|
"state",
|
|
|
|
"last_active_ts",
|
|
|
|
"last_federation_update_ts",
|
|
|
|
"last_user_sync_ts",
|
|
|
|
"status_msg",
|
|
|
|
"currently_active",
|
|
|
|
),
|
|
|
|
order_direction="ASC",
|
2021-03-25 06:34:23 -04:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2023-10-06 11:41:57 -04:00
|
|
|
for (
|
|
|
|
user_id,
|
|
|
|
state,
|
|
|
|
last_active_ts,
|
|
|
|
last_federation_update_ts,
|
|
|
|
last_user_sync_ts,
|
|
|
|
status_msg,
|
|
|
|
currently_active,
|
|
|
|
) in rows:
|
|
|
|
users_to_state[user_id] = UserPresenceState(
|
|
|
|
user_id=user_id,
|
|
|
|
state=state,
|
|
|
|
last_active_ts=last_active_ts,
|
|
|
|
last_federation_update_ts=last_federation_update_ts,
|
|
|
|
last_user_sync_ts=last_user_sync_ts,
|
|
|
|
status_msg=status_msg,
|
|
|
|
currently_active=bool(currently_active),
|
|
|
|
)
|
2021-03-25 06:34:23 -04:00
|
|
|
|
|
|
|
# We've run out of updates to query
|
|
|
|
if len(rows) < limit:
|
|
|
|
break
|
|
|
|
|
|
|
|
offset += limit
|
|
|
|
|
|
|
|
return users_to_state
|
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
def get_current_presence_token(self) -> int:
|
2019-10-21 07:56:42 -04:00
|
|
|
return self._presence_id_gen.get_current_token()
|
2021-04-23 07:21:55 -04:00
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
def _get_active_presence(self, db_conn: Connection) -> List[UserPresenceState]:
|
2021-04-23 07:21:55 -04:00
|
|
|
"""Fetch non-offline presence from the database so that we can register
|
|
|
|
the appropriate time outs.
|
|
|
|
"""
|
|
|
|
|
2021-09-03 12:16:56 -04:00
|
|
|
# The `presence_stream_state_not_offline_idx` index should be used for this
|
|
|
|
# query.
|
2021-04-23 07:21:55 -04:00
|
|
|
sql = (
|
|
|
|
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
|
|
|
|
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
|
|
|
|
" WHERE state != ?"
|
|
|
|
)
|
|
|
|
|
|
|
|
txn = db_conn.cursor()
|
|
|
|
txn.execute(sql, (PresenceState.OFFLINE,))
|
2023-10-05 11:07:38 -04:00
|
|
|
rows = txn.fetchall()
|
2021-04-23 07:21:55 -04:00
|
|
|
txn.close()
|
|
|
|
|
2023-10-05 11:07:38 -04:00
|
|
|
return [
|
|
|
|
UserPresenceState(
|
|
|
|
user_id=user_id,
|
|
|
|
state=state,
|
|
|
|
last_active_ts=last_active_ts,
|
|
|
|
last_federation_update_ts=last_federation_update_ts,
|
|
|
|
last_user_sync_ts=last_user_sync_ts,
|
|
|
|
status_msg=status_msg,
|
|
|
|
currently_active=bool(currently_active),
|
|
|
|
)
|
|
|
|
for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows
|
|
|
|
]
|
2021-04-23 07:21:55 -04:00
|
|
|
|
2022-02-21 11:03:06 -05:00
|
|
|
def take_presence_startup_info(self) -> List[UserPresenceState]:
|
2021-04-23 07:21:55 -04:00
|
|
|
active_on_startup = self._presence_on_startup
|
2022-02-21 11:03:06 -05:00
|
|
|
self._presence_on_startup = []
|
2021-04-23 07:21:55 -04:00
|
|
|
return active_on_startup
|
|
|
|
|
2022-07-18 09:28:14 -04:00
|
|
|
def process_replication_rows(
|
2022-04-27 08:05:00 -04:00
|
|
|
self,
|
|
|
|
stream_name: str,
|
|
|
|
instance_name: str,
|
|
|
|
token: int,
|
|
|
|
rows: Iterable[Any],
|
|
|
|
) -> None:
|
2021-04-23 07:21:55 -04:00
|
|
|
if stream_name == PresenceStream.NAME:
|
|
|
|
for row in rows:
|
|
|
|
self.presence_stream_cache.entity_has_changed(row.user_id, token)
|
|
|
|
self._get_presence_for_user.invalidate((row.user_id,))
|
2022-07-18 09:28:14 -04:00
|
|
|
return super().process_replication_rows(stream_name, instance_name, token, rows)
|
2023-01-04 06:49:26 -05:00
|
|
|
|
|
|
|
def process_replication_position(
|
|
|
|
self, stream_name: str, instance_name: str, token: int
|
|
|
|
) -> None:
|
|
|
|
if stream_name == PresenceStream.NAME:
|
|
|
|
self._presence_id_gen.advance(instance_name, token)
|
|
|
|
super().process_replication_position(stream_name, instance_name, token)
|