2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2020-04-22 17:39:04 -04:00
|
|
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
2014-08-12 10:10:52 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-08-12 22:14:34 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
"""This module is responsible for keeping track of presence status of local
|
|
|
|
and remote users.
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
The methods that define policy are:
|
|
|
|
- PresenceHandler._update_states
|
|
|
|
- PresenceHandler._handle_timeouts
|
|
|
|
- should_notify
|
|
|
|
"""
|
2020-04-22 17:39:04 -04:00
|
|
|
import abc
|
2021-04-14 12:06:06 -04:00
|
|
|
import contextlib
|
2018-07-09 02:09:20 -04:00
|
|
|
import logging
|
2016-02-15 12:10:40 -05:00
|
|
|
from contextlib import contextmanager
|
2021-04-06 09:38:30 -04:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Dict,
|
|
|
|
FrozenSet,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
Union,
|
|
|
|
)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
from prometheus_client import Counter
|
2020-02-26 10:33:26 -05:00
|
|
|
from typing_extensions import ContextManager
|
2018-07-09 02:09:20 -04:00
|
|
|
|
2019-03-26 08:45:22 -04:00
|
|
|
import synapse.metrics
|
|
|
|
from synapse.api.constants import EventTypes, Membership, PresenceState
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.api.errors import SynapseError
|
2020-08-17 07:24:46 -04:00
|
|
|
from synapse.api.presence import UserPresenceState
|
2021-04-06 09:38:30 -04:00
|
|
|
from synapse.events.presence_router import PresenceRouter
|
2019-07-03 10:07:04 -04:00
|
|
|
from synapse.logging.context import run_in_background
|
|
|
|
from synapse.logging.utils import log_function
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.metrics import LaterGauge
|
2019-03-26 08:45:22 -04:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2021-04-14 12:06:06 -04:00
|
|
|
from synapse.replication.http.presence import (
|
|
|
|
ReplicationBumpPresenceActiveTime,
|
|
|
|
ReplicationPresenceSetState,
|
|
|
|
)
|
|
|
|
from synapse.replication.tcp.commands import ClearUserSyncsCommand
|
2020-07-23 16:47:36 -04:00
|
|
|
from synapse.state import StateHandler
|
2020-08-05 16:38:57 -04:00
|
|
|
from synapse.storage.databases.main import DataStore
|
2020-08-24 14:25:27 -04:00
|
|
|
from synapse.types import Collection, JsonDict, UserID, get_domain_from_id
|
2018-08-10 09:50:21 -04:00
|
|
|
from synapse.util.async_helpers import Linearizer
|
2021-04-06 09:38:30 -04:00
|
|
|
from synapse.util.caches.descriptors import _CacheContext, cached
|
2016-02-19 06:50:48 -05:00
|
|
|
from synapse.util.metrics import Measure
|
2016-02-15 12:10:40 -05:00
|
|
|
from synapse.util.wheel_timer import WheelTimer
|
2018-05-21 20:47:37 -04:00
|
|
|
|
2020-11-17 09:09:40 -05:00
|
|
|
if TYPE_CHECKING:
|
2020-10-29 11:18:17 -04:00
|
|
|
from synapse.server import HomeServer
|
2020-02-26 10:33:26 -05:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2015-03-04 11:50:23 -05:00
|
|
|
|
2018-05-21 20:47:37 -04:00
|
|
|
notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "")
|
2018-05-22 18:32:57 -04:00
|
|
|
federation_presence_out_counter = Counter(
|
|
|
|
"synapse_handler_presence_federation_presence_out", ""
|
|
|
|
)
|
2018-05-21 20:47:37 -04:00
|
|
|
presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "")
|
|
|
|
timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "")
|
|
|
|
federation_presence_counter = Counter(
|
|
|
|
"synapse_handler_presence_federation_presence", ""
|
|
|
|
)
|
|
|
|
bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", "")
|
2016-02-19 04:50:54 -05:00
|
|
|
|
2018-05-21 20:47:37 -04:00
|
|
|
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
|
2016-06-03 08:40:55 -04:00
|
|
|
|
2018-05-22 18:32:57 -04:00
|
|
|
notify_reason_counter = Counter(
|
|
|
|
"synapse_handler_presence_notify_reason", "", ["reason"]
|
|
|
|
)
|
|
|
|
state_transition_counter = Counter(
|
|
|
|
"synapse_handler_presence_state_transition", "", ["from", "to"]
|
2016-09-06 06:31:01 -04:00
|
|
|
)
|
2016-09-05 09:12:11 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
|
|
|
# "currently_active"
|
2016-02-02 12:18:50 -05:00
|
|
|
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
2015-03-23 13:25:44 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
# How long to wait until a new /events or /sync request before assuming
|
|
|
|
# the client has gone.
|
|
|
|
SYNC_ONLINE_TIMEOUT = 30 * 1000
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
# How long to wait before marking the user as idle. Compared against last active
|
|
|
|
IDLE_TIMER = 5 * 60 * 1000
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
# How often we expect remote servers to resend us presence.
|
|
|
|
FEDERATION_TIMEOUT = 30 * 60 * 1000
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
# How often to resend presence to remote servers
|
|
|
|
FEDERATION_PING_INTERVAL = 25 * 60 * 1000
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-06-02 10:20:15 -04:00
|
|
|
# How long we will wait before assuming that the syncs from an external process
|
|
|
|
# are dead.
|
|
|
|
EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
|
|
|
|
|
2021-04-14 12:06:06 -04:00
|
|
|
# Delay before a worker tells the presence handler that a user has stopped
|
|
|
|
# syncing.
|
|
|
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
|
2014-08-12 10:10:52 -04:00
|
|
|
|
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
class BasePresenceHandler(abc.ABC):
|
|
|
|
"""Parts of the PresenceHandler that are shared between workers and master"""
|
|
|
|
|
2020-10-29 11:18:17 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-04-22 17:39:04 -04:00
|
|
|
self.clock = hs.get_clock()
|
|
|
|
self.store = hs.get_datastore()
|
|
|
|
|
2021-03-18 13:37:19 -04:00
|
|
|
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
active_presence = self.store.take_presence_startup_info()
|
|
|
|
self.user_to_current_state = {state.user_id: state for state in active_presence}
|
|
|
|
|
|
|
|
@abc.abstractmethod
|
|
|
|
async def user_syncing(
|
|
|
|
self, user_id: str, affect_presence: bool
|
|
|
|
) -> ContextManager[None]:
|
|
|
|
"""Returns a context manager that should surround any stream requests
|
|
|
|
from the user.
|
|
|
|
|
|
|
|
This allows us to keep track of who is currently streaming and who isn't
|
|
|
|
without having to have timers outside of this module to avoid flickering
|
|
|
|
when users disconnect/reconnect.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user that is starting a sync
|
|
|
|
affect_presence: If false this function will be a no-op.
|
|
|
|
Useful for streams that are not associated with an actual
|
|
|
|
client that is being used by a user.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abc.abstractmethod
|
|
|
|
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
|
|
|
"""Get an iterable of syncing users on this worker, to send to the presence handler
|
|
|
|
|
|
|
|
This is called when a replication connection is established. It should return
|
|
|
|
a list of user ids, which are then sent as USER_SYNC commands to inform the
|
|
|
|
process handling presence about those users.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
An iterable of user_id strings.
|
|
|
|
"""
|
|
|
|
|
|
|
|
async def get_state(self, target_user: UserID) -> UserPresenceState:
|
|
|
|
results = await self.get_states([target_user.to_string()])
|
|
|
|
return results[0]
|
|
|
|
|
|
|
|
async def get_states(
|
|
|
|
self, target_user_ids: Iterable[str]
|
|
|
|
) -> List[UserPresenceState]:
|
|
|
|
"""Get the presence state for users."""
|
|
|
|
|
|
|
|
updates_d = await self.current_state_for_users(target_user_ids)
|
|
|
|
updates = list(updates_d.values())
|
|
|
|
|
|
|
|
for user_id in set(target_user_ids) - {u.user_id for u in updates}:
|
|
|
|
updates.append(UserPresenceState.default(user_id))
|
|
|
|
|
|
|
|
return updates
|
|
|
|
|
|
|
|
async def current_state_for_users(
|
|
|
|
self, user_ids: Iterable[str]
|
|
|
|
) -> Dict[str, UserPresenceState]:
|
|
|
|
"""Get the current presence state for multiple users.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict: `user_id` -> `UserPresenceState`
|
|
|
|
"""
|
|
|
|
states = {
|
|
|
|
user_id: self.user_to_current_state.get(user_id, None)
|
|
|
|
for user_id in user_ids
|
|
|
|
}
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
missing = [user_id for user_id, state in states.items() if not state]
|
2020-04-22 17:39:04 -04:00
|
|
|
if missing:
|
|
|
|
# There are things not in our in memory cache. Lets pull them out of
|
|
|
|
# the database.
|
|
|
|
res = await self.store.get_presence_for_users(missing)
|
|
|
|
states.update(res)
|
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
missing = [user_id for user_id, state in states.items() if not state]
|
2020-04-22 17:39:04 -04:00
|
|
|
if missing:
|
|
|
|
new = {
|
|
|
|
user_id: UserPresenceState.default(user_id) for user_id in missing
|
|
|
|
}
|
|
|
|
states.update(new)
|
|
|
|
self.user_to_current_state.update(new)
|
|
|
|
|
|
|
|
return states
|
|
|
|
|
|
|
|
@abc.abstractmethod
|
|
|
|
async def set_state(
|
|
|
|
self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False
|
|
|
|
) -> None:
|
|
|
|
"""Set the presence state of the user. """
|
|
|
|
|
2020-05-22 11:11:35 -04:00
|
|
|
@abc.abstractmethod
|
|
|
|
async def bump_presence_active_time(self, user: UserID):
|
|
|
|
"""We've seen the user do something that indicates they're interacting
|
|
|
|
with the app.
|
|
|
|
"""
|
|
|
|
|
2021-04-14 12:06:06 -04:00
|
|
|
async def update_external_syncs_row(
|
|
|
|
self, process_id, user_id, is_syncing, sync_time_msec
|
|
|
|
):
|
|
|
|
"""Update the syncing users for an external process as a delta.
|
|
|
|
|
|
|
|
This is a no-op when presence is handled by a different worker.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
process_id (str): An identifier for the process the users are
|
|
|
|
syncing against. This allows synapse to process updates
|
|
|
|
as user start and stop syncing against a given process.
|
|
|
|
user_id (str): The user who has started or stopped syncing
|
|
|
|
is_syncing (bool): Whether or not the user is now syncing
|
|
|
|
sync_time_msec(int): Time in ms when the user was last syncing
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def update_external_syncs_clear(self, process_id):
|
|
|
|
"""Marks all users that had been marked as syncing by a given process
|
|
|
|
as offline.
|
|
|
|
|
|
|
|
Used when the process has stopped/disappeared.
|
|
|
|
|
|
|
|
This is a no-op when presence is handled by a different worker.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def process_replication_rows(self, token, rows):
|
|
|
|
"""Process presence stream rows received over replication."""
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class _NullContextManager(ContextManager[None]):
|
|
|
|
"""A context manager which does nothing."""
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class WorkerPresenceHandler(BasePresenceHandler):
|
|
|
|
def __init__(self, hs):
|
|
|
|
super().__init__(hs)
|
|
|
|
self.hs = hs
|
|
|
|
self.is_mine_id = hs.is_mine_id
|
|
|
|
|
|
|
|
self.presence_router = hs.get_presence_router()
|
|
|
|
self._presence_enabled = hs.config.use_presence
|
|
|
|
|
|
|
|
# The number of ongoing syncs on this process, by user id.
|
|
|
|
# Empty if _presence_enabled is false.
|
|
|
|
self._user_to_num_current_syncs = {} # type: Dict[str, int]
|
|
|
|
|
|
|
|
self.notifier = hs.get_notifier()
|
|
|
|
self.instance_id = hs.get_instance_id()
|
|
|
|
|
|
|
|
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
|
|
|
# but we haven't notified the master of that yet
|
|
|
|
self.users_going_offline = {}
|
|
|
|
|
|
|
|
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
|
|
|
|
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
|
|
|
|
|
|
|
|
self._send_stop_syncing_loop = self.clock.looping_call(
|
|
|
|
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
|
|
|
)
|
|
|
|
|
|
|
|
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
|
|
|
|
|
|
|
hs.get_reactor().addSystemEventTrigger(
|
|
|
|
"before",
|
|
|
|
"shutdown",
|
|
|
|
run_as_background_process,
|
|
|
|
"generic_presence.on_shutdown",
|
|
|
|
self._on_shutdown,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _on_shutdown(self):
|
|
|
|
if self._presence_enabled:
|
|
|
|
self.hs.get_tcp_replication().send_command(
|
|
|
|
ClearUserSyncsCommand(self.instance_id)
|
|
|
|
)
|
|
|
|
|
|
|
|
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
|
|
|
if self._presence_enabled:
|
|
|
|
self.hs.get_tcp_replication().send_user_sync(
|
|
|
|
self.instance_id, user_id, is_syncing, last_sync_ms
|
|
|
|
)
|
|
|
|
|
|
|
|
def mark_as_coming_online(self, user_id):
|
|
|
|
"""A user has started syncing. Send a UserSync to the master, unless they
|
|
|
|
had recently stopped syncing.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id (str)
|
|
|
|
"""
|
|
|
|
going_offline = self.users_going_offline.pop(user_id, None)
|
|
|
|
if not going_offline:
|
|
|
|
# Safe to skip because we haven't yet told the master they were offline
|
|
|
|
self.send_user_sync(user_id, True, self.clock.time_msec())
|
|
|
|
|
|
|
|
def mark_as_going_offline(self, user_id):
|
|
|
|
"""A user has stopped syncing. We wait before notifying the master as
|
|
|
|
its likely they'll come back soon. This allows us to avoid sending
|
|
|
|
a stopped syncing immediately followed by a started syncing notification
|
|
|
|
to the master
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id (str)
|
|
|
|
"""
|
|
|
|
self.users_going_offline[user_id] = self.clock.time_msec()
|
|
|
|
|
|
|
|
def send_stop_syncing(self):
|
|
|
|
"""Check if there are any users who have stopped syncing a while ago
|
|
|
|
and haven't come back yet. If there are poke the master about them.
|
|
|
|
"""
|
|
|
|
now = self.clock.time_msec()
|
|
|
|
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
|
|
|
if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
|
|
|
|
self.users_going_offline.pop(user_id, None)
|
|
|
|
self.send_user_sync(user_id, False, last_sync_ms)
|
|
|
|
|
|
|
|
async def user_syncing(
|
|
|
|
self, user_id: str, affect_presence: bool
|
|
|
|
) -> ContextManager[None]:
|
|
|
|
"""Record that a user is syncing.
|
|
|
|
|
|
|
|
Called by the sync and events servlets to record that a user has connected to
|
|
|
|
this worker and is waiting for some events.
|
|
|
|
"""
|
|
|
|
if not affect_presence or not self._presence_enabled:
|
|
|
|
return _NullContextManager()
|
|
|
|
|
|
|
|
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
|
|
|
|
self._user_to_num_current_syncs[user_id] = curr_sync + 1
|
|
|
|
|
|
|
|
# If we went from no in flight sync to some, notify replication
|
|
|
|
if self._user_to_num_current_syncs[user_id] == 1:
|
|
|
|
self.mark_as_coming_online(user_id)
|
|
|
|
|
|
|
|
def _end():
|
|
|
|
# We check that the user_id is in user_to_num_current_syncs because
|
|
|
|
# user_to_num_current_syncs may have been cleared if we are
|
|
|
|
# shutting down.
|
|
|
|
if user_id in self._user_to_num_current_syncs:
|
|
|
|
self._user_to_num_current_syncs[user_id] -= 1
|
|
|
|
|
|
|
|
# If we went from one in flight sync to non, notify replication
|
|
|
|
if self._user_to_num_current_syncs[user_id] == 0:
|
|
|
|
self.mark_as_going_offline(user_id)
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def _user_syncing():
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
_end()
|
|
|
|
|
|
|
|
return _user_syncing()
|
|
|
|
|
|
|
|
async def notify_from_replication(self, states, stream_id):
|
|
|
|
parties = await get_interested_parties(self.store, self.presence_router, states)
|
|
|
|
room_ids_to_states, users_to_states = parties
|
|
|
|
|
|
|
|
self.notifier.on_new_event(
|
|
|
|
"presence_key",
|
|
|
|
stream_id,
|
|
|
|
rooms=room_ids_to_states.keys(),
|
|
|
|
users=users_to_states.keys(),
|
|
|
|
)
|
|
|
|
|
|
|
|
async def process_replication_rows(self, token, rows):
|
|
|
|
states = [
|
|
|
|
UserPresenceState(
|
|
|
|
row.user_id,
|
|
|
|
row.state,
|
|
|
|
row.last_active_ts,
|
|
|
|
row.last_federation_update_ts,
|
|
|
|
row.last_user_sync_ts,
|
|
|
|
row.status_msg,
|
|
|
|
row.currently_active,
|
|
|
|
)
|
|
|
|
for row in rows
|
|
|
|
]
|
|
|
|
|
|
|
|
for state in states:
|
|
|
|
self.user_to_current_state[state.user_id] = state
|
|
|
|
|
|
|
|
stream_id = token
|
|
|
|
await self.notify_from_replication(states, stream_id)
|
|
|
|
|
|
|
|
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
|
|
|
return [
|
|
|
|
user_id
|
|
|
|
for user_id, count in self._user_to_num_current_syncs.items()
|
|
|
|
if count > 0
|
|
|
|
]
|
|
|
|
|
|
|
|
async def set_state(self, target_user, state, ignore_status_msg=False):
|
|
|
|
"""Set the presence state of the user."""
|
|
|
|
presence = state["presence"]
|
|
|
|
|
|
|
|
valid_presence = (
|
|
|
|
PresenceState.ONLINE,
|
|
|
|
PresenceState.UNAVAILABLE,
|
|
|
|
PresenceState.OFFLINE,
|
|
|
|
PresenceState.BUSY,
|
|
|
|
)
|
|
|
|
|
|
|
|
if presence not in valid_presence or (
|
|
|
|
presence == PresenceState.BUSY and not self._busy_presence_enabled
|
|
|
|
):
|
|
|
|
raise SynapseError(400, "Invalid presence state")
|
|
|
|
|
|
|
|
user_id = target_user.to_string()
|
|
|
|
|
|
|
|
# If presence is disabled, no-op
|
|
|
|
if not self.hs.config.use_presence:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Proxy request to master
|
|
|
|
await self._set_state_client(
|
|
|
|
user_id=user_id, state=state, ignore_status_msg=ignore_status_msg
|
|
|
|
)
|
|
|
|
|
|
|
|
async def bump_presence_active_time(self, user):
|
|
|
|
"""We've seen the user do something that indicates they're interacting
|
|
|
|
with the app.
|
|
|
|
"""
|
|
|
|
# If presence is disabled, no-op
|
|
|
|
if not self.hs.config.use_presence:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Proxy request to master
|
|
|
|
user_id = user.to_string()
|
|
|
|
await self._bump_active_client(user_id=user_id)
|
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
|
|
|
|
class PresenceHandler(BasePresenceHandler):
|
2020-10-29 11:18:17 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-04-22 17:39:04 -04:00
|
|
|
super().__init__(hs)
|
2018-08-13 02:47:46 -04:00
|
|
|
self.hs = hs
|
2016-05-16 14:08:40 -04:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2019-03-26 08:45:22 -04:00
|
|
|
self.server_name = hs.hostname
|
2016-02-15 12:10:40 -05:00
|
|
|
self.wheel_timer = WheelTimer()
|
|
|
|
self.notifier = hs.get_notifier()
|
2016-11-16 09:28:03 -05:00
|
|
|
self.federation = hs.get_federation_sender()
|
2016-08-26 09:54:30 -04:00
|
|
|
self.state = hs.get_state_handler()
|
2021-04-06 09:38:30 -04:00
|
|
|
self.presence_router = hs.get_presence_router()
|
2020-05-15 06:44:00 -04:00
|
|
|
self._presence_enabled = hs.config.use_presence
|
2016-08-26 09:54:30 -04:00
|
|
|
|
2018-03-12 12:17:08 -04:00
|
|
|
federation_registry = hs.get_federation_registry()
|
|
|
|
|
|
|
|
federation_registry.register_edu_handler("m.presence", self.incoming_presence)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2018-05-21 20:47:37 -04:00
|
|
|
LaterGauge(
|
2018-05-22 11:56:03 -04:00
|
|
|
"synapse_handlers_presence_user_to_current_state_size",
|
|
|
|
"",
|
|
|
|
[],
|
|
|
|
lambda: len(self.user_to_current_state),
|
|
|
|
)
|
2016-03-01 07:56:39 -05:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
now = self.clock.time_msec()
|
2020-04-22 17:39:04 -04:00
|
|
|
for state in self.user_to_current_state.values():
|
2016-02-15 12:10:40 -05:00
|
|
|
self.wheel_timer.insert(
|
2016-02-18 05:11:43 -05:00
|
|
|
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 05:11:43 -05:00
|
|
|
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
2016-05-16 14:08:40 -04:00
|
|
|
if self.is_mine_id(state.user_id):
|
2016-02-15 12:10:40 -05:00
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 05:11:43 -05:00
|
|
|
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 05:11:43 -05:00
|
|
|
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
# Set of users who have presence in the `user_to_current_state` that
|
|
|
|
# have not yet been persisted
|
2020-02-26 10:33:26 -05:00
|
|
|
self.unpersisted_users_changes = set() # type: Set[str]
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2019-06-06 09:45:17 -04:00
|
|
|
hs.get_reactor().addSystemEventTrigger(
|
|
|
|
"before",
|
|
|
|
"shutdown",
|
|
|
|
run_as_background_process,
|
|
|
|
"presence.on_shutdown",
|
|
|
|
self._on_shutdown,
|
|
|
|
)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
|
|
|
self._next_serial = 1
|
|
|
|
|
2016-06-02 10:20:15 -04:00
|
|
|
# Keeps track of the number of *ongoing* syncs on this process. While
|
|
|
|
# this is non zero a user will never go offline.
|
2020-02-26 10:33:26 -05:00
|
|
|
self.user_to_num_current_syncs = {} # type: Dict[str, int]
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2016-06-02 10:20:15 -04:00
|
|
|
# Keeps track of the number of *ongoing* syncs on other processes.
|
|
|
|
# While any sync is ongoing on another process the user will never
|
|
|
|
# go offline.
|
|
|
|
# Each process has a unique identifier and an update frequency. If
|
|
|
|
# no update is received from that process within the update period then
|
|
|
|
# we assume that all the sync requests on that process have stopped.
|
|
|
|
# Stored as a dict from process_id to set of user_id, and a dict of
|
|
|
|
# process_id to millisecond timestamp last updated.
|
2019-10-18 01:42:26 -04:00
|
|
|
self.external_process_to_current_syncs = {} # type: Dict[int, Set[str]]
|
|
|
|
self.external_process_last_updated_ms = {} # type: Dict[int, int]
|
|
|
|
|
2017-03-27 10:56:25 -04:00
|
|
|
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
|
2016-06-02 10:20:15 -04:00
|
|
|
|
2021-03-03 05:21:46 -05:00
|
|
|
if self._presence_enabled:
|
|
|
|
# Start a LoopingCall in 30s that fires every 5s.
|
|
|
|
# The initial delay is to allow disconnected clients a chance to
|
|
|
|
# reconnect before we treat them as offline.
|
|
|
|
def run_timeout_handler():
|
|
|
|
return run_as_background_process(
|
|
|
|
"handle_presence_timeouts", self._handle_timeouts
|
|
|
|
)
|
2015-03-04 11:50:23 -05:00
|
|
|
|
2021-03-03 05:21:46 -05:00
|
|
|
self.clock.call_later(
|
|
|
|
30, self.clock.looping_call, run_timeout_handler, 5000
|
2019-05-28 16:20:11 -04:00
|
|
|
)
|
|
|
|
|
2021-03-03 05:21:46 -05:00
|
|
|
def run_persister():
|
|
|
|
return run_as_background_process(
|
|
|
|
"persist_presence_changes", self._persist_unpersisted_changes
|
|
|
|
)
|
|
|
|
|
|
|
|
self.clock.call_later(60, self.clock.looping_call, run_persister, 60 * 1000)
|
2016-08-30 10:39:50 -04:00
|
|
|
|
2018-05-22 18:32:57 -04:00
|
|
|
LaterGauge(
|
|
|
|
"synapse_handlers_presence_wheel_timer_size",
|
|
|
|
"",
|
|
|
|
[],
|
|
|
|
lambda: len(self.wheel_timer),
|
|
|
|
)
|
2016-02-19 04:50:54 -05:00
|
|
|
|
2019-03-26 08:45:22 -04:00
|
|
|
# Used to handle sending of presence to newly joined users/servers
|
2021-03-03 05:21:46 -05:00
|
|
|
if self._presence_enabled:
|
2019-03-26 08:45:22 -04:00
|
|
|
self.notifier.add_replication_callback(self.notify_new_event)
|
|
|
|
|
|
|
|
# Presence is best effort and quickly heals itself, so lets just always
|
|
|
|
# stream from the current state when we restart.
|
|
|
|
self._event_pos = self.store.get_current_events_token()
|
|
|
|
self._event_processing = False
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _on_shutdown(self):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Gets called when shutting down. This lets us persist any updates that
|
|
|
|
we haven't yet persisted, e.g. updates that only changes some internal
|
|
|
|
timers. This allows changes to persist across startup without having to
|
|
|
|
persist every single change.
|
|
|
|
|
|
|
|
If this does not run it simply means that some of the timers will fire
|
|
|
|
earlier than they should when synapse is restarted. This affect of this
|
|
|
|
is some spurious presence changes that will self-correct.
|
|
|
|
"""
|
2018-08-13 02:47:46 -04:00
|
|
|
# If the DB pool has already terminated, don't try updating
|
2020-08-05 16:38:57 -04:00
|
|
|
if not self.store.db_pool.is_running():
|
2018-08-13 02:47:46 -04:00
|
|
|
return
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
logger.info(
|
2016-09-13 08:26:33 -04:00
|
|
|
"Performing _on_shutdown. Persisting %d unpersisted changes",
|
2016-02-15 12:10:40 -05:00
|
|
|
len(self.user_to_current_state),
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
if self.unpersisted_users_changes:
|
2019-05-28 16:20:11 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
await self.store.update_presence(
|
2016-02-15 12:10:40 -05:00
|
|
|
[
|
|
|
|
self.user_to_current_state[user_id]
|
|
|
|
for user_id in self.unpersisted_users_changes
|
|
|
|
]
|
|
|
|
)
|
|
|
|
logger.info("Finished _on_shutdown")
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _persist_unpersisted_changes(self):
|
2016-08-30 10:39:50 -04:00
|
|
|
"""We periodically persist the unpersisted changes, as otherwise they
|
|
|
|
may stack up and slow down shutdown times.
|
|
|
|
"""
|
|
|
|
unpersisted = self.unpersisted_users_changes
|
|
|
|
self.unpersisted_users_changes = set()
|
|
|
|
|
2016-08-30 10:50:20 -04:00
|
|
|
if unpersisted:
|
2019-09-03 07:44:14 -04:00
|
|
|
logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
|
2020-02-26 10:33:26 -05:00
|
|
|
await self.store.update_presence(
|
2018-08-17 11:08:45 -04:00
|
|
|
[self.user_to_current_state[user_id] for user_id in unpersisted]
|
2019-05-28 16:20:11 -04:00
|
|
|
)
|
2016-08-30 10:39:50 -04:00
|
|
|
|
2021-02-17 12:31:37 -05:00
|
|
|
async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None:
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
|
|
|
the notifier and federation if and only if the changed presence state
|
|
|
|
should be sent to clients/servers.
|
2021-02-17 12:31:37 -05:00
|
|
|
|
|
|
|
Args:
|
|
|
|
new_states: The new user presence state updates to process.
|
2016-02-15 12:10:40 -05:00
|
|
|
"""
|
|
|
|
now = self.clock.time_msec()
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
with Measure(self.clock, "presence_update_states"):
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
# NOTE: We purposefully don't await between now and when we've
|
2016-02-19 06:50:48 -05:00
|
|
|
# calculated what we want to do with the new states, to avoid races.
|
2016-02-18 04:54:08 -05:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
to_notify = {} # Changes we want to notify everyone about
|
|
|
|
to_federation_ping = {} # These need sending keep-alives
|
2014-09-02 08:30:36 -04:00
|
|
|
|
2016-09-09 09:26:05 -04:00
|
|
|
# Only bother handling the last presence change for each user
|
|
|
|
new_states_dict = {}
|
|
|
|
for new_state in new_states:
|
|
|
|
new_states_dict[new_state.user_id] = new_state
|
2021-02-17 12:31:37 -05:00
|
|
|
new_states = new_states_dict.values()
|
2016-09-09 09:26:05 -04:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
for new_state in new_states:
|
|
|
|
user_id = new_state.user_id
|
2015-08-18 05:30:07 -04:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
# Its fine to not hit the database here, as the only thing not in
|
|
|
|
# the current state cache are OFFLINE states, where the only field
|
|
|
|
# of interest is last_active which is safe enough to assume is 0
|
|
|
|
# here.
|
|
|
|
prev_state = self.user_to_current_state.get(
|
|
|
|
user_id, UserPresenceState.default(user_id)
|
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
new_state, should_notify, should_ping = handle_update(
|
|
|
|
prev_state,
|
|
|
|
new_state,
|
2016-05-16 14:08:40 -04:00
|
|
|
is_mine=self.is_mine_id(user_id),
|
2016-02-19 06:50:48 -05:00
|
|
|
wheel_timer=self.wheel_timer,
|
|
|
|
now=now,
|
|
|
|
)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
self.user_to_current_state[user_id] = new_state
|
2014-08-13 14:18:55 -04:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
if should_notify:
|
|
|
|
to_notify[user_id] = new_state
|
|
|
|
elif should_ping:
|
|
|
|
to_federation_ping[user_id] = new_state
|
2016-02-19 04:50:54 -05:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
# TODO: We should probably ensure there are no races hereafter
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2018-05-21 20:47:37 -04:00
|
|
|
presence_updates_counter.inc(len(new_states))
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
if to_notify:
|
2018-05-21 20:47:37 -04:00
|
|
|
notified_presence_counter.inc(len(to_notify))
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._persist_and_notify(list(to_notify.values()))
|
2016-02-19 06:50:48 -05:00
|
|
|
|
2020-02-21 07:15:07 -05:00
|
|
|
self.unpersisted_users_changes |= {s.user_id for s in new_states}
|
2016-02-19 06:50:48 -05:00
|
|
|
self.unpersisted_users_changes -= set(to_notify.keys())
|
2014-11-18 10:25:55 -05:00
|
|
|
|
2016-02-19 06:50:48 -05:00
|
|
|
to_federation_ping = {
|
|
|
|
user_id: state
|
|
|
|
for user_id, state in to_federation_ping.items()
|
|
|
|
if user_id not in to_notify
|
|
|
|
}
|
|
|
|
if to_federation_ping:
|
2018-05-21 20:47:37 -04:00
|
|
|
federation_presence_out_counter.inc(len(to_federation_ping))
|
2016-02-23 09:03:46 -05:00
|
|
|
|
2017-04-10 11:48:30 -04:00
|
|
|
self._push_to_remotes(to_federation_ping.values())
|
2014-11-18 10:25:55 -05:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _handle_timeouts(self):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Checks the presence of users that have timed out and updates as
|
|
|
|
appropriate.
|
|
|
|
"""
|
2019-07-29 12:02:18 -04:00
|
|
|
logger.debug("Handling presence timeouts")
|
2016-02-15 12:10:40 -05:00
|
|
|
now = self.clock.time_msec()
|
2014-11-18 10:25:55 -05:00
|
|
|
|
2019-05-28 16:20:11 -04:00
|
|
|
# Fetch the list of users that *may* have timed out. Things may have
|
|
|
|
# changed since the timeout was set, so we won't necessarily have to
|
|
|
|
# take any action.
|
|
|
|
users_to_check = set(self.wheel_timer.fetch(now))
|
|
|
|
|
|
|
|
# Check whether the lists of syncing processes from an external
|
|
|
|
# process have expired.
|
|
|
|
expired_process_ids = [
|
|
|
|
process_id
|
|
|
|
for process_id, last_update in self.external_process_last_updated_ms.items()
|
|
|
|
if now - last_update > EXTERNAL_PROCESS_EXPIRY
|
|
|
|
]
|
|
|
|
for process_id in expired_process_ids:
|
2019-10-18 01:42:26 -04:00
|
|
|
# For each expired process drop tracking info and check the users
|
|
|
|
# that were syncing on that process to see if they need to be timed
|
|
|
|
# out.
|
2019-05-28 16:20:11 -04:00
|
|
|
users_to_check.update(
|
2019-10-18 01:42:26 -04:00
|
|
|
self.external_process_to_current_syncs.pop(process_id, ())
|
2019-05-28 16:20:11 -04:00
|
|
|
)
|
2019-10-18 01:42:26 -04:00
|
|
|
self.external_process_last_updated_ms.pop(process_id)
|
2019-05-28 16:20:11 -04:00
|
|
|
|
|
|
|
states = [
|
|
|
|
self.user_to_current_state.get(user_id, UserPresenceState.default(user_id))
|
|
|
|
for user_id in users_to_check
|
|
|
|
]
|
|
|
|
|
|
|
|
timers_fired_counter.inc(len(states))
|
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
syncing_user_ids = {
|
|
|
|
user_id
|
|
|
|
for user_id, count in self.user_to_num_current_syncs.items()
|
|
|
|
if count
|
|
|
|
}
|
|
|
|
for user_ids in self.external_process_to_current_syncs.values():
|
|
|
|
syncing_user_ids.update(user_ids)
|
|
|
|
|
2019-05-28 16:20:11 -04:00
|
|
|
changes = handle_timeouts(
|
|
|
|
states,
|
|
|
|
is_mine_fn=self.is_mine_id,
|
2020-04-22 17:39:04 -04:00
|
|
|
syncing_user_ids=syncing_user_ids,
|
2019-05-28 16:20:11 -04:00
|
|
|
now=now,
|
|
|
|
)
|
2015-08-18 05:30:07 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
return await self._update_states(changes)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def bump_presence_active_time(self, user):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""We've seen the user do something that indicates they're interacting
|
|
|
|
with the app.
|
2015-08-18 05:30:07 -04:00
|
|
|
"""
|
2018-08-17 11:08:45 -04:00
|
|
|
# If presence is disabled, no-op
|
|
|
|
if not self.hs.config.use_presence:
|
|
|
|
return
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
user_id = user.to_string()
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2016-02-19 06:32:04 -05:00
|
|
|
bump_active_time_counter.inc()
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_state = await self.current_state_for_user(user_id)
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2016-02-18 05:11:43 -05:00
|
|
|
new_fields = {"last_active_ts": self.clock.time_msec()}
|
2016-02-18 04:54:08 -05:00
|
|
|
if prev_state.state == PresenceState.UNAVAILABLE:
|
|
|
|
new_fields["state"] = PresenceState.ONLINE
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states([prev_state.copy_and_replace(**new_fields)])
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def user_syncing(
|
|
|
|
self, user_id: str, affect_presence: bool = True
|
|
|
|
) -> ContextManager[None]:
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Returns a context manager that should surround any stream requests
|
|
|
|
from the user.
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
This allows us to keep track of who is currently streaming and who isn't
|
|
|
|
without having to have timers outside of this module to avoid flickering
|
|
|
|
when users disconnect/reconnect.
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
Args:
|
|
|
|
user_id (str)
|
|
|
|
affect_presence (bool): If false this function will be a no-op.
|
|
|
|
Useful for streams that are not associated with an actual
|
|
|
|
client that is being used by a user.
|
|
|
|
"""
|
2018-08-17 11:08:45 -04:00
|
|
|
# Override if it should affect the user's presence, if presence is
|
|
|
|
# disabled.
|
|
|
|
if not self.hs.config.use_presence:
|
|
|
|
affect_presence = False
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
if affect_presence:
|
|
|
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
|
|
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_state = await self.current_state_for_user(user_id)
|
2016-02-15 12:10:40 -05:00
|
|
|
if prev_state.state == PresenceState.OFFLINE:
|
|
|
|
# If they're currently offline then bring them online, otherwise
|
|
|
|
# just update the last sync times.
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states(
|
2019-06-20 05:32:02 -04:00
|
|
|
[
|
2016-02-15 12:10:40 -05:00
|
|
|
prev_state.copy_and_replace(
|
|
|
|
state=PresenceState.ONLINE,
|
2016-02-18 05:11:43 -05:00
|
|
|
last_active_ts=self.clock.time_msec(),
|
|
|
|
last_user_sync_ts=self.clock.time_msec(),
|
2019-06-20 05:32:02 -04:00
|
|
|
)
|
|
|
|
]
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
|
|
|
else:
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states(
|
2019-06-20 05:32:02 -04:00
|
|
|
[
|
2016-02-15 12:10:40 -05:00
|
|
|
prev_state.copy_and_replace(
|
2016-02-18 05:11:43 -05:00
|
|
|
last_user_sync_ts=self.clock.time_msec()
|
2019-06-20 05:32:02 -04:00
|
|
|
)
|
|
|
|
]
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _end():
|
2018-04-27 06:07:40 -04:00
|
|
|
try:
|
2016-02-15 12:10:40 -05:00
|
|
|
self.user_to_num_current_syncs[user_id] -= 1
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_state = await self.current_state_for_user(user_id)
|
|
|
|
await self._update_states(
|
2019-06-20 05:32:02 -04:00
|
|
|
[
|
2016-02-15 12:10:40 -05:00
|
|
|
prev_state.copy_and_replace(
|
2016-02-18 05:11:43 -05:00
|
|
|
last_user_sync_ts=self.clock.time_msec()
|
2019-06-20 05:32:02 -04:00
|
|
|
)
|
|
|
|
]
|
2016-02-15 12:10:40 -05:00
|
|
|
)
|
2018-04-27 06:07:40 -04:00
|
|
|
except Exception:
|
|
|
|
logger.exception("Error updating presence after sync")
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
@contextmanager
|
|
|
|
def _user_syncing():
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2018-04-27 06:07:40 -04:00
|
|
|
if affect_presence:
|
|
|
|
run_in_background(_end)
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return _user_syncing()
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
|
|
|
# since we are the process handling presence, there is nothing to do here.
|
|
|
|
return []
|
2016-06-02 10:20:15 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def update_external_syncs_row(
|
2017-03-31 06:46:20 -04:00
|
|
|
self, process_id, user_id, is_syncing, sync_time_msec
|
|
|
|
):
|
2017-03-27 10:56:25 -04:00
|
|
|
"""Update the syncing users for an external process as a delta.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
process_id (str): An identifier for the process the users are
|
|
|
|
syncing against. This allows synapse to process updates
|
|
|
|
as user start and stop syncing against a given process.
|
|
|
|
user_id (str): The user who has started or stopped syncing
|
|
|
|
is_syncing (bool): Whether or not the user is now syncing
|
2017-03-31 06:46:20 -04:00
|
|
|
sync_time_msec(int): Time in ms when the user was last syncing
|
2017-03-27 10:56:25 -04:00
|
|
|
"""
|
2020-02-26 10:33:26 -05:00
|
|
|
with (await self.external_sync_linearizer.queue(process_id)):
|
|
|
|
prev_state = await self.current_state_for_user(user_id)
|
2017-03-27 10:56:25 -04:00
|
|
|
|
|
|
|
process_presence = self.external_process_to_current_syncs.setdefault(
|
|
|
|
process_id, set()
|
|
|
|
)
|
|
|
|
|
|
|
|
updates = []
|
|
|
|
if is_syncing and user_id not in process_presence:
|
|
|
|
if prev_state.state == PresenceState.OFFLINE:
|
|
|
|
updates.append(
|
|
|
|
prev_state.copy_and_replace(
|
|
|
|
state=PresenceState.ONLINE,
|
2017-03-31 06:46:20 -04:00
|
|
|
last_active_ts=sync_time_msec,
|
|
|
|
last_user_sync_ts=sync_time_msec,
|
2019-06-20 05:32:02 -04:00
|
|
|
)
|
2017-03-27 10:56:25 -04:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
updates.append(
|
2017-03-31 06:46:20 -04:00
|
|
|
prev_state.copy_and_replace(last_user_sync_ts=sync_time_msec)
|
2017-03-27 10:56:25 -04:00
|
|
|
)
|
|
|
|
process_presence.add(user_id)
|
|
|
|
elif user_id in process_presence:
|
|
|
|
updates.append(
|
2017-03-31 06:46:20 -04:00
|
|
|
prev_state.copy_and_replace(last_user_sync_ts=sync_time_msec)
|
2017-03-27 10:56:25 -04:00
|
|
|
)
|
2017-03-31 06:36:32 -04:00
|
|
|
|
|
|
|
if not is_syncing:
|
2017-03-30 09:26:08 -04:00
|
|
|
process_presence.discard(user_id)
|
2017-03-27 10:56:25 -04:00
|
|
|
|
|
|
|
if updates:
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states(updates)
|
2017-03-27 10:56:25 -04:00
|
|
|
|
2017-03-31 06:46:20 -04:00
|
|
|
self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
|
2017-03-27 10:56:25 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def update_external_syncs_clear(self, process_id):
|
2017-03-27 10:56:25 -04:00
|
|
|
"""Marks all users that had been marked as syncing by a given process
|
|
|
|
as offline.
|
|
|
|
|
|
|
|
Used when the process has stopped/disappeared.
|
|
|
|
"""
|
2020-02-26 10:33:26 -05:00
|
|
|
with (await self.external_sync_linearizer.queue(process_id)):
|
2017-03-27 10:56:25 -04:00
|
|
|
process_presence = self.external_process_to_current_syncs.pop(
|
|
|
|
process_id, set()
|
|
|
|
)
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_states = await self.current_state_for_users(process_presence)
|
2017-03-27 10:56:25 -04:00
|
|
|
time_now_ms = self.clock.time_msec()
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states(
|
2017-03-27 10:56:25 -04:00
|
|
|
[
|
|
|
|
prev_state.copy_and_replace(last_user_sync_ts=time_now_ms)
|
2020-06-15 07:03:36 -04:00
|
|
|
for prev_state in prev_states.values()
|
2017-03-27 10:56:25 -04:00
|
|
|
]
|
|
|
|
)
|
|
|
|
self.external_process_last_updated_ms.pop(process_id, None)
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def current_state_for_user(self, user_id):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Get the current presence state for a user."""
|
2020-02-26 10:33:26 -05:00
|
|
|
res = await self.current_state_for_users([user_id])
|
2019-07-23 09:00:55 -04:00
|
|
|
return res[user_id]
|
2015-08-17 05:40:23 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _persist_and_notify(self, states):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Persist states in the database, poke the notifier and send to
|
|
|
|
interested remote servers
|
|
|
|
"""
|
2020-02-26 10:33:26 -05:00
|
|
|
stream_id, max_token = await self.store.update_presence(states)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
parties = await get_interested_parties(self.store, self.presence_router, states)
|
2017-04-10 11:48:30 -04:00
|
|
|
room_ids_to_states, users_to_states = parties
|
2014-09-01 10:38:37 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
self.notifier.on_new_event(
|
|
|
|
"presence_key",
|
|
|
|
stream_id,
|
|
|
|
rooms=room_ids_to_states.keys(),
|
2017-04-10 11:48:30 -04:00
|
|
|
users=[UserID.from_string(u) for u in users_to_states],
|
2014-11-20 11:24:00 -05:00
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2017-04-10 11:48:30 -04:00
|
|
|
self._push_to_remotes(states)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2017-04-10 11:48:30 -04:00
|
|
|
def _push_to_remotes(self, states):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Sends state updates to remote servers.
|
|
|
|
|
|
|
|
Args:
|
2017-04-12 05:11:43 -04:00
|
|
|
states (list(UserPresenceState))
|
2016-02-15 12:10:40 -05:00
|
|
|
"""
|
2017-04-10 11:48:30 -04:00
|
|
|
self.federation.send_presence(states)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def incoming_presence(self, origin, content):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Called when we receive a `m.presence` EDU from a remote server."""
|
2020-05-15 06:44:00 -04:00
|
|
|
if not self._presence_enabled:
|
|
|
|
return
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
now = self.clock.time_msec()
|
|
|
|
updates = []
|
|
|
|
for push in content.get("push", []):
|
|
|
|
# A "push" contains a list of presence that we are probably interested
|
|
|
|
# in.
|
|
|
|
user_id = push.get("user_id", None)
|
|
|
|
if not user_id:
|
|
|
|
logger.info(
|
|
|
|
"Got presence update from %r with no 'user_id': %r", origin, push
|
|
|
|
)
|
|
|
|
continue
|
2016-09-08 10:04:46 -04:00
|
|
|
|
|
|
|
if get_domain_from_id(user_id) != origin:
|
|
|
|
logger.info(
|
|
|
|
"Got presence update from %r with bad 'user_id': %r",
|
|
|
|
origin,
|
|
|
|
user_id,
|
|
|
|
)
|
|
|
|
continue
|
2014-08-13 14:18:55 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
presence_state = push.get("presence", None)
|
|
|
|
if not presence_state:
|
|
|
|
logger.info(
|
|
|
|
"Got presence update from %r with no 'presence_state': %r",
|
|
|
|
origin,
|
|
|
|
push,
|
|
|
|
)
|
|
|
|
continue
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-18 05:11:43 -05:00
|
|
|
new_fields = {"state": presence_state, "last_federation_update_ts": now}
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
last_active_ago = push.get("last_active_ago", None)
|
|
|
|
if last_active_ago is not None:
|
2016-02-18 05:11:43 -05:00
|
|
|
new_fields["last_active_ts"] = now - last_active_ago
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
new_fields["status_msg"] = push.get("status_msg", None)
|
2016-02-23 05:40:11 -05:00
|
|
|
new_fields["currently_active"] = push.get("currently_active", False)
|
2014-09-01 11:16:35 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_state = await self.current_state_for_user(user_id)
|
2016-02-15 12:10:40 -05:00
|
|
|
updates.append(prev_state.copy_and_replace(**new_fields))
|
2015-03-23 13:25:44 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
if updates:
|
2018-05-21 20:47:37 -04:00
|
|
|
federation_presence_counter.inc(len(updates))
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states(updates)
|
2014-09-01 11:16:35 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def set_state(self, target_user, state, ignore_status_msg=False):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Set the presence state of the user."""
|
|
|
|
status_msg = state.get("status_msg", None)
|
|
|
|
presence = state["presence"]
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2016-02-18 04:16:32 -05:00
|
|
|
valid_presence = (
|
|
|
|
PresenceState.ONLINE,
|
|
|
|
PresenceState.UNAVAILABLE,
|
|
|
|
PresenceState.OFFLINE,
|
2021-03-19 11:11:08 -04:00
|
|
|
PresenceState.BUSY,
|
2016-02-18 04:16:32 -05:00
|
|
|
)
|
2021-03-18 13:37:19 -04:00
|
|
|
|
2021-03-19 11:11:08 -04:00
|
|
|
if presence not in valid_presence or (
|
|
|
|
presence == PresenceState.BUSY and not self._busy_presence_enabled
|
|
|
|
):
|
2016-02-18 04:16:32 -05:00
|
|
|
raise SynapseError(400, "Invalid presence state")
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
user_id = target_user.to_string()
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_state = await self.current_state_for_user(user_id)
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2016-08-10 07:57:30 -04:00
|
|
|
new_fields = {"state": presence}
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2016-08-10 07:57:30 -04:00
|
|
|
if not ignore_status_msg:
|
|
|
|
msg = status_msg if presence != PresenceState.OFFLINE else None
|
|
|
|
new_fields["status_msg"] = msg
|
|
|
|
|
2021-03-19 11:11:08 -04:00
|
|
|
if presence == PresenceState.ONLINE or (
|
2021-03-19 13:19:50 -04:00
|
|
|
presence == PresenceState.BUSY and self._busy_presence_enabled
|
2021-03-18 13:37:19 -04:00
|
|
|
):
|
2016-02-18 05:11:43 -05:00
|
|
|
new_fields["last_active_ts"] = self.clock.time_msec()
|
2016-02-15 12:10:40 -05:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._update_states([prev_state.copy_and_replace(**new_fields)])
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def is_visible(self, observed_user, observer_user):
|
2016-02-18 04:09:50 -05:00
|
|
|
"""Returns whether a user can see another user's presence."""
|
2020-02-26 10:33:26 -05:00
|
|
|
observer_room_ids = await self.store.get_rooms_for_user(
|
2017-03-16 07:51:46 -04:00
|
|
|
observer_user.to_string()
|
|
|
|
)
|
2020-02-26 10:33:26 -05:00
|
|
|
observed_room_ids = await self.store.get_rooms_for_user(
|
2017-03-16 07:51:46 -04:00
|
|
|
observed_user.to_string()
|
|
|
|
)
|
2014-08-28 13:43:03 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
if observer_room_ids & observed_room_ids:
|
2019-07-23 09:00:55 -04:00
|
|
|
return True
|
2014-08-28 13:43:03 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return False
|
2014-08-28 13:43:03 -04:00
|
|
|
|
2020-06-16 12:10:28 -04:00
|
|
|
async def get_all_presence_updates(
|
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
|
|
|
) -> Tuple[List[Tuple[int, list]], int, bool]:
|
2016-03-01 09:49:41 -05:00
|
|
|
"""
|
|
|
|
Gets a list of presence update rows from between the given stream ids.
|
|
|
|
Each row has:
|
|
|
|
- stream_id(str)
|
|
|
|
- user_id(str)
|
|
|
|
- state(str)
|
|
|
|
- last_active_ts(int)
|
|
|
|
- last_federation_update_ts(int)
|
|
|
|
- last_user_sync_ts(int)
|
|
|
|
- status_msg(int)
|
|
|
|
- currently_active(int)
|
2020-06-16 12:10:28 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
instance_name: The writer we want to fetch updates from. Unused
|
|
|
|
here since there is only ever one writer.
|
|
|
|
last_id: The token to fetch updates from. Exclusive.
|
|
|
|
current_id: The token to fetch updates up to. Inclusive.
|
|
|
|
limit: The requested limit for the number of rows to return. The
|
|
|
|
function may return more or fewer rows.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple consisting of: the updates, a token to use to fetch
|
|
|
|
subsequent updates, and whether we returned fewer rows than exists
|
|
|
|
between the requested tokens due to the limit.
|
|
|
|
|
|
|
|
The token returned can be used in a subsequent call to this
|
2020-10-23 12:38:40 -04:00
|
|
|
function to get further updates.
|
2020-06-16 12:10:28 -04:00
|
|
|
|
|
|
|
The updates are a list of 2-tuples of stream ID and the row data
|
2016-03-01 09:49:41 -05:00
|
|
|
"""
|
2020-06-16 12:10:28 -04:00
|
|
|
|
2016-03-01 09:49:41 -05:00
|
|
|
# TODO(markjh): replicate the unpersisted changes.
|
|
|
|
# This could use the in-memory stores for recent changes.
|
2020-06-16 12:10:28 -04:00
|
|
|
rows = await self.store.get_all_presence_updates(
|
|
|
|
instance_name, last_id, current_id, limit
|
|
|
|
)
|
2019-07-23 09:00:55 -04:00
|
|
|
return rows
|
2016-03-01 09:49:41 -05:00
|
|
|
|
2019-03-26 08:45:22 -04:00
|
|
|
def notify_new_event(self):
|
|
|
|
"""Called when new events have happened. Handles users and servers
|
|
|
|
joining rooms and require being sent presence.
|
|
|
|
"""
|
|
|
|
|
2019-03-28 09:48:41 -04:00
|
|
|
if self._event_processing:
|
|
|
|
return
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _process_presence():
|
2019-03-28 09:55:21 -04:00
|
|
|
assert not self._event_processing
|
2019-03-26 08:45:22 -04:00
|
|
|
|
|
|
|
self._event_processing = True
|
|
|
|
try:
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._unsafe_process()
|
2019-03-26 08:45:22 -04:00
|
|
|
finally:
|
|
|
|
self._event_processing = False
|
|
|
|
|
|
|
|
run_as_background_process("presence.notify_new_event", _process_presence)
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _unsafe_process(self):
|
2019-03-26 08:45:22 -04:00
|
|
|
# Loop round handling deltas until we're up to date
|
|
|
|
while True:
|
|
|
|
with Measure(self.clock, "presence_delta"):
|
2019-10-10 06:29:01 -04:00
|
|
|
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
|
|
|
|
if self._event_pos == room_max_stream_ordering:
|
2019-03-26 08:45:22 -04:00
|
|
|
return
|
|
|
|
|
2019-10-10 06:29:01 -04:00
|
|
|
logger.debug(
|
|
|
|
"Processing presence stats %s->%s",
|
|
|
|
self._event_pos,
|
|
|
|
room_max_stream_ordering,
|
|
|
|
)
|
2020-02-26 10:33:26 -05:00
|
|
|
max_pos, deltas = await self.store.get_current_state_deltas(
|
2019-10-10 06:29:01 -04:00
|
|
|
self._event_pos, room_max_stream_ordering
|
|
|
|
)
|
2020-02-26 10:33:26 -05:00
|
|
|
await self._handle_state_delta(deltas)
|
2019-03-26 08:45:22 -04:00
|
|
|
|
2019-10-10 06:29:01 -04:00
|
|
|
self._event_pos = max_pos
|
2019-03-26 08:45:22 -04:00
|
|
|
|
|
|
|
# Expose current event processing position to prometheus
|
|
|
|
synapse.metrics.event_processing_positions.labels("presence").set(
|
2019-10-10 06:29:01 -04:00
|
|
|
max_pos
|
2019-03-26 08:45:22 -04:00
|
|
|
)
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
async def _handle_state_delta(self, deltas):
|
2019-03-26 08:45:22 -04:00
|
|
|
"""Process current state deltas to find new joins that need to be
|
|
|
|
handled.
|
|
|
|
"""
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
# A map of destination to a set of user state that they should receive
|
|
|
|
presence_destinations = {} # type: Dict[str, Set[UserPresenceState]]
|
|
|
|
|
2019-03-26 08:45:22 -04:00
|
|
|
for delta in deltas:
|
|
|
|
typ = delta["type"]
|
|
|
|
state_key = delta["state_key"]
|
|
|
|
room_id = delta["room_id"]
|
|
|
|
event_id = delta["event_id"]
|
|
|
|
prev_event_id = delta["prev_event_id"]
|
|
|
|
|
|
|
|
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
# Drop any event that isn't a membership join
|
2019-03-26 08:45:22 -04:00
|
|
|
if typ != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
|
2019-04-26 06:13:16 -04:00
|
|
|
if event_id is None:
|
|
|
|
# state has been deleted, so this is not a join. We only care about
|
|
|
|
# joins.
|
|
|
|
continue
|
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
event = await self.store.get_event(event_id, allow_none=True)
|
2019-06-05 10:45:46 -04:00
|
|
|
if not event or event.content.get("membership") != Membership.JOIN:
|
2019-03-26 08:45:22 -04:00
|
|
|
# We only care about joins
|
|
|
|
continue
|
|
|
|
|
|
|
|
if prev_event_id:
|
2020-02-26 10:33:26 -05:00
|
|
|
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
2019-06-05 10:45:46 -04:00
|
|
|
if (
|
|
|
|
prev_event
|
|
|
|
and prev_event.content.get("membership") == Membership.JOIN
|
|
|
|
):
|
2019-03-26 08:45:22 -04:00
|
|
|
# Ignore changes to join events.
|
|
|
|
continue
|
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
# Retrieve any user presence state updates that need to be sent as a result,
|
|
|
|
# and the destinations that need to receive it
|
|
|
|
destinations, user_presence_states = await self._on_user_joined_room(
|
|
|
|
room_id, state_key
|
|
|
|
)
|
|
|
|
|
|
|
|
# Insert the destinations and respective updates into our destinations dict
|
|
|
|
for destination in destinations:
|
|
|
|
presence_destinations.setdefault(destination, set()).update(
|
|
|
|
user_presence_states
|
|
|
|
)
|
|
|
|
|
|
|
|
# Send out user presence updates for each destination
|
|
|
|
for destination, user_state_set in presence_destinations.items():
|
|
|
|
self.federation.send_presence_to_destinations(
|
|
|
|
destinations=[destination], states=user_state_set
|
|
|
|
)
|
2019-03-26 08:45:22 -04:00
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
async def _on_user_joined_room(
|
|
|
|
self, room_id: str, user_id: str
|
|
|
|
) -> Tuple[List[str], List[UserPresenceState]]:
|
2019-03-28 09:48:41 -04:00
|
|
|
"""Called when we detect a user joining the room via the current state
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
delta stream. Returns the destinations that need to be updated and the
|
|
|
|
presence updates to send to them.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The ID of the room that the user has joined.
|
|
|
|
user_id: The ID of the user that has joined the room.
|
2019-03-26 08:45:22 -04:00
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
Returns:
|
|
|
|
A tuple of destinations and presence updates to send to them.
|
|
|
|
"""
|
2019-03-28 09:48:41 -04:00
|
|
|
if self.is_mine_id(user_id):
|
|
|
|
# If this is a local user then we need to send their presence
|
|
|
|
# out to hosts in the room (who don't already have it)
|
2019-03-26 08:45:22 -04:00
|
|
|
|
2019-03-28 09:48:41 -04:00
|
|
|
# TODO: We should be able to filter the hosts down to those that
|
|
|
|
# haven't previously seen the user
|
2019-03-26 08:45:22 -04:00
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
remote_hosts = await self.state.get_current_hosts_in_room(room_id)
|
2019-03-26 08:45:22 -04:00
|
|
|
|
2019-03-28 09:48:41 -04:00
|
|
|
# Filter out ourselves.
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
filtered_remote_hosts = [
|
|
|
|
host for host in remote_hosts if host != self.server_name
|
|
|
|
]
|
2019-03-26 08:45:22 -04:00
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
state = await self.current_state_for_user(user_id)
|
|
|
|
return filtered_remote_hosts, [state]
|
2019-03-28 09:48:41 -04:00
|
|
|
else:
|
|
|
|
# A remote user has joined the room, so we need to:
|
|
|
|
# 1. Check if this is a new server in the room
|
|
|
|
# 2. If so send any presence they don't already have for
|
|
|
|
# local users in the room.
|
|
|
|
|
|
|
|
# TODO: We should be able to filter the users down to those that
|
|
|
|
# the server hasn't previously seen
|
|
|
|
|
|
|
|
# TODO: Check that this is actually a new server joining the
|
|
|
|
# room.
|
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
remote_host = get_domain_from_id(user_id)
|
|
|
|
|
2020-07-24 10:59:51 -04:00
|
|
|
users = await self.state.get_current_users_in_room(room_id)
|
|
|
|
user_ids = list(filter(self.is_mine_id, users))
|
2019-03-28 09:48:41 -04:00
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
states_d = await self.current_state_for_users(user_ids)
|
2019-03-28 09:48:41 -04:00
|
|
|
|
|
|
|
# Filter out old presence, i.e. offline presence states where
|
|
|
|
# the user hasn't been active for a week. We can change this
|
|
|
|
# depending on what we want the UX to be, but at the least we
|
|
|
|
# should filter out offline presence where the state is just the
|
|
|
|
# default state.
|
|
|
|
now = self.clock.time_msec()
|
|
|
|
states = [
|
|
|
|
state
|
2020-04-22 17:39:04 -04:00
|
|
|
for state in states_d.values()
|
2019-03-28 09:48:41 -04:00
|
|
|
if state.state != PresenceState.OFFLINE
|
|
|
|
or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
|
|
|
|
or state.status_msg is not None
|
|
|
|
]
|
|
|
|
|
Be smarter about which hosts to send presence to when processing room joins (#9402)
This PR attempts to eliminate unnecessary presence sending work when your local server joins a room, or when a remote server joins a room your server is participating in by processing state deltas in chunks rather than individually.
---
When your server joins a room for the first time, it requests the historical state as well. This chunk of new state is passed to the presence handler which, after filtering that state down to only membership joins, will send presence updates to homeservers for each join processed.
It turns out that we were being a bit naive and processing each event individually, and sending out presence updates for every one of those joins. Even if many different joins were users on the same server (hello IRC bridges), we'd send presence to that same homeserver for every remote user join we saw.
This PR attempts to deduplicate all of that by processing the entire batch of state deltas at once, instead of only doing each join individually. We process the joins and note down which servers need which presence:
* If it was a local user join, send that user's latest presence to all servers in the room
* If it was a remote user join, send the presence for all local users in the room to that homeserver
We deduplicate by inserting all of those pending updates into a dictionary of the form:
```
{
server_name1: {presence_update1, ...},
server_name2: {presence_update1, presence_update2, ...}
}
```
Only after building this dict do we then start sending out presence updates.
2021-02-19 06:37:29 -05:00
|
|
|
return [remote_host], states
|
2019-03-26 08:45:22 -04:00
|
|
|
|
2014-08-28 13:43:03 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
def should_notify(old_state, new_state):
|
|
|
|
"""Decides if a presence state change should be sent to interested parties."""
|
2016-09-06 05:28:35 -04:00
|
|
|
if old_state == new_state:
|
|
|
|
return False
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
if old_state.status_msg != new_state.status_msg:
|
2018-05-21 20:47:37 -04:00
|
|
|
notify_reason_counter.labels("status_msg_change").inc()
|
2016-02-15 12:10:40 -05:00
|
|
|
return True
|
2014-08-28 13:43:03 -04:00
|
|
|
|
2016-09-06 05:23:38 -04:00
|
|
|
if old_state.state != new_state.state:
|
2018-05-21 20:47:37 -04:00
|
|
|
notify_reason_counter.labels("state_change").inc()
|
|
|
|
state_transition_counter.labels(old_state.state, new_state.state).inc()
|
2016-09-06 05:23:38 -04:00
|
|
|
return True
|
2014-08-28 13:43:03 -04:00
|
|
|
|
2016-09-06 05:23:38 -04:00
|
|
|
if old_state.state == PresenceState.ONLINE:
|
2016-02-15 12:10:40 -05:00
|
|
|
if new_state.currently_active != old_state.currently_active:
|
2018-05-21 20:47:37 -04:00
|
|
|
notify_reason_counter.labels("current_active_change").inc()
|
2016-02-15 12:10:40 -05:00
|
|
|
return True
|
2015-05-14 12:48:12 -04:00
|
|
|
|
2016-08-30 10:00:14 -04:00
|
|
|
if (
|
|
|
|
new_state.last_active_ts - old_state.last_active_ts
|
|
|
|
> LAST_ACTIVE_GRANULARITY
|
|
|
|
):
|
2020-10-23 12:38:40 -04:00
|
|
|
# Only notify about last active bumps if we're not currently active
|
2016-09-06 05:23:38 -04:00
|
|
|
if not new_state.currently_active:
|
2018-05-21 20:47:37 -04:00
|
|
|
notify_reason_counter.labels("last_active_change_online").inc()
|
2016-08-30 10:00:14 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
2016-02-15 12:10:40 -05:00
|
|
|
# Always notify for a transition where last active gets bumped.
|
2018-05-21 20:47:37 -04:00
|
|
|
notify_reason_counter.labels("last_active_change_not_online").inc()
|
2016-02-15 12:10:40 -05:00
|
|
|
return True
|
2015-05-14 12:48:12 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
return False
|
2015-05-14 10:29:58 -04:00
|
|
|
|
|
|
|
|
2017-03-15 10:27:34 -04:00
|
|
|
def format_user_presence_state(state, now, include_user_id=True):
|
2016-02-15 12:10:40 -05:00
|
|
|
"""Convert UserPresenceState to a format that can be sent down to clients
|
|
|
|
and to other servers.
|
2017-03-15 10:50:33 -04:00
|
|
|
|
|
|
|
The "user_id" is optional so that this function can be used to format presence
|
|
|
|
updates for client /sync responses and for federation /send requests.
|
2016-02-15 12:10:40 -05:00
|
|
|
"""
|
|
|
|
content = {"presence": state.state}
|
2017-03-15 10:27:34 -04:00
|
|
|
if include_user_id:
|
|
|
|
content["user_id"] = state.user_id
|
2016-02-18 05:11:43 -05:00
|
|
|
if state.last_active_ts:
|
|
|
|
content["last_active_ago"] = now - state.last_active_ts
|
2016-02-15 12:10:40 -05:00
|
|
|
if state.status_msg and state.state != PresenceState.OFFLINE:
|
|
|
|
content["status_msg"] = state.status_msg
|
|
|
|
if state.state == PresenceState.ONLINE:
|
|
|
|
content["currently_active"] = state.currently_active
|
2015-05-14 10:29:58 -04:00
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
return content
|
2015-05-14 10:29:58 -04:00
|
|
|
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2020-09-04 06:54:56 -04:00
|
|
|
class PresenceEventSource:
|
2020-10-29 11:18:17 -04:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2016-05-16 14:08:40 -04:00
|
|
|
# We can't call get_presence_handler here because there's a cycle:
|
|
|
|
#
|
|
|
|
# Presence -> Notifier -> PresenceEventSource -> Presence
|
|
|
|
#
|
2021-04-06 09:38:30 -04:00
|
|
|
# Same with get_module_api, get_presence_router
|
|
|
|
#
|
|
|
|
# AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler
|
2016-05-16 14:08:40 -04:00
|
|
|
self.get_presence_handler = hs.get_presence_handler
|
2021-04-06 09:38:30 -04:00
|
|
|
self.get_module_api = hs.get_module_api
|
|
|
|
self.get_presence_router = hs.get_presence_router
|
2014-08-29 12:09:15 -04:00
|
|
|
self.clock = hs.get_clock()
|
2016-02-15 12:10:40 -05:00
|
|
|
self.store = hs.get_datastore()
|
2016-08-26 09:54:30 -04:00
|
|
|
self.state = hs.get_state_handler()
|
2014-08-29 12:09:15 -04:00
|
|
|
|
2014-12-03 14:48:14 -05:00
|
|
|
@log_function
|
2020-02-26 10:33:26 -05:00
|
|
|
async def get_new_events(
|
2016-02-15 12:10:40 -05:00
|
|
|
self,
|
|
|
|
user,
|
|
|
|
from_key,
|
|
|
|
room_ids=None,
|
|
|
|
include_offline=True,
|
2017-02-02 08:07:18 -05:00
|
|
|
explicit_room_id=None,
|
2021-04-13 05:41:34 -04:00
|
|
|
**kwargs,
|
2021-04-06 09:38:30 -04:00
|
|
|
) -> Tuple[List[UserPresenceState], int]:
|
2016-02-15 12:10:40 -05:00
|
|
|
# The process for getting presence events are:
|
|
|
|
# 1. Get the rooms the user is in.
|
|
|
|
# 2. Get the list of user in the rooms.
|
|
|
|
# 3. Get the list of users that are in the user's presence list.
|
|
|
|
# 4. If there is a from_key set, cross reference the list of users
|
|
|
|
# with the `presence_stream_cache` to see which ones we actually
|
|
|
|
# need to check.
|
|
|
|
# 5. Load current state for the users.
|
|
|
|
#
|
|
|
|
# We don't try and limit the presence updates by the current token, as
|
|
|
|
# sending down the rare duplicate is not a concern.
|
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
user_id = user.to_string()
|
|
|
|
stream_change_cache = self.store.presence_stream_cache
|
|
|
|
|
2016-02-23 09:03:46 -05:00
|
|
|
with Measure(self.clock, "presence.get_new_events"):
|
2021-04-06 09:38:30 -04:00
|
|
|
if user_id in self.get_module_api()._send_full_presence_to_local_users:
|
|
|
|
# This user has been specified by a module to receive all current, online
|
|
|
|
# user presence. Removing from_key and setting include_offline to false
|
|
|
|
# will do effectively this.
|
|
|
|
from_key = None
|
|
|
|
include_offline = False
|
|
|
|
|
2016-02-23 08:49:16 -05:00
|
|
|
if from_key is not None:
|
|
|
|
from_key = int(from_key)
|
2014-08-29 12:09:15 -04:00
|
|
|
|
2019-06-21 06:10:27 -04:00
|
|
|
max_token = self.store.get_current_presence_token()
|
|
|
|
if from_key == max_token:
|
|
|
|
# This is necessary as due to the way stream ID generators work
|
|
|
|
# we may get updates that have a stream ID greater than the max
|
2019-07-01 05:22:42 -04:00
|
|
|
# token (e.g. max_token is N but stream generator may return
|
|
|
|
# results for N+2, due to N+1 not having finished being
|
|
|
|
# persisted yet).
|
|
|
|
#
|
|
|
|
# This is usually fine, as it just means that we may send down
|
|
|
|
# some presence updates multiple times. However, we need to be
|
|
|
|
# careful that the sync stream either actually does make some
|
|
|
|
# progress or doesn't return, otherwise clients will end up
|
|
|
|
# tight looping calling /sync due to it immediately returning
|
|
|
|
# the same token repeatedly.
|
|
|
|
#
|
|
|
|
# Hence this guard where we just return nothing so that the sync
|
|
|
|
# doesn't return. C.f. #5503.
|
2019-08-30 11:28:26 -04:00
|
|
|
return [], max_token
|
2019-06-21 06:10:27 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# Figure out which other users this user should receive updates for
|
2020-02-26 10:33:26 -05:00
|
|
|
users_interested_in = await self._get_interested_in(user, explicit_room_id)
|
2016-02-23 09:48:23 -05:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# We have a set of users that we're interested in the presence of. We want to
|
|
|
|
# cross-reference that with the users that have actually changed their presence.
|
2016-02-23 09:48:23 -05:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# Check whether this user should see all user updates
|
2020-10-29 11:18:17 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
if users_interested_in == PresenceRouter.ALL_USERS:
|
|
|
|
# Provide presence state for all users
|
|
|
|
presence_updates = await self._filter_all_presence_updates_for_user(
|
|
|
|
user_id, include_offline, from_key
|
|
|
|
)
|
2016-06-03 08:40:55 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# Remove the user from the list of users to receive all presence
|
|
|
|
if user_id in self.get_module_api()._send_full_presence_to_local_users:
|
|
|
|
self.get_module_api()._send_full_presence_to_local_users.remove(
|
|
|
|
user_id
|
2016-02-23 09:48:23 -05:00
|
|
|
)
|
2021-04-06 09:38:30 -04:00
|
|
|
|
|
|
|
return presence_updates, max_token
|
|
|
|
|
|
|
|
# Make mypy happy. users_interested_in should now be a set
|
|
|
|
assert not isinstance(users_interested_in, str)
|
|
|
|
|
|
|
|
# The set of users that we're interested in and that have had a presence update.
|
|
|
|
# We'll actually pull the presence updates for these users at the end.
|
|
|
|
interested_and_updated_users = (
|
|
|
|
set()
|
|
|
|
) # type: Union[Set[str], FrozenSet[str]]
|
|
|
|
|
|
|
|
if from_key:
|
|
|
|
# First get all users that have had a presence update
|
|
|
|
updated_users = stream_change_cache.get_all_entities_changed(from_key)
|
|
|
|
|
|
|
|
# Cross-reference users we're interested in with those that have had updates.
|
|
|
|
# Use a slightly-optimised method for processing smaller sets of updates.
|
|
|
|
if updated_users is not None and len(updated_users) < 500:
|
|
|
|
# For small deltas, it's quicker to get all changes and then
|
|
|
|
# cross-reference with the users we're interested in
|
|
|
|
get_updates_counter.labels("stream").inc()
|
|
|
|
for other_user_id in updated_users:
|
|
|
|
if other_user_id in users_interested_in:
|
|
|
|
# mypy thinks this variable could be a FrozenSet as it's possibly set
|
|
|
|
# to one in the `get_entities_changed` call below, and `add()` is not
|
|
|
|
# method on a FrozenSet. That doesn't affect us here though, as
|
|
|
|
# `interested_and_updated_users` is clearly a set() above.
|
|
|
|
interested_and_updated_users.add(other_user_id) # type: ignore
|
2016-02-23 09:48:23 -05:00
|
|
|
else:
|
2021-04-06 09:38:30 -04:00
|
|
|
# Too many possible updates. Find all users we can see and check
|
|
|
|
# if any of them have changed.
|
|
|
|
get_updates_counter.labels("full").inc()
|
2014-08-29 12:09:15 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
interested_and_updated_users = (
|
|
|
|
stream_change_cache.get_entities_changed(
|
|
|
|
users_interested_in, from_key
|
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
# No from_key has been specified. Return the presence for all users
|
|
|
|
# this user is interested in
|
|
|
|
interested_and_updated_users = users_interested_in
|
|
|
|
|
|
|
|
# Retrieve the current presence state for each user
|
|
|
|
users_to_state = await self.get_presence_handler().current_state_for_users(
|
|
|
|
interested_and_updated_users
|
|
|
|
)
|
|
|
|
presence_updates = list(users_to_state.values())
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# Remove the user from the list of users to receive all presence
|
|
|
|
if user_id in self.get_module_api()._send_full_presence_to_local_users:
|
|
|
|
self.get_module_api()._send_full_presence_to_local_users.remove(user_id)
|
|
|
|
|
|
|
|
if not include_offline:
|
|
|
|
# Filter out offline presence states
|
|
|
|
presence_updates = self._filter_offline_presence_state(presence_updates)
|
|
|
|
|
|
|
|
return presence_updates, max_token
|
|
|
|
|
|
|
|
async def _filter_all_presence_updates_for_user(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
include_offline: bool,
|
|
|
|
from_key: Optional[int] = None,
|
|
|
|
) -> List[UserPresenceState]:
|
|
|
|
"""
|
|
|
|
Computes the presence updates a user should receive.
|
|
|
|
|
|
|
|
First pulls presence updates from the database. Then consults PresenceRouter
|
|
|
|
for whether any updates should be excluded by user ID.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: The User ID of the user to compute presence updates for.
|
|
|
|
include_offline: Whether to include offline presence states from the results.
|
|
|
|
from_key: The minimum stream ID of updates to pull from the database
|
|
|
|
before filtering.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A list of presence states for the given user to receive.
|
|
|
|
"""
|
|
|
|
if from_key:
|
|
|
|
# Only return updates since the last sync
|
|
|
|
updated_users = self.store.presence_stream_cache.get_all_entities_changed(
|
|
|
|
from_key
|
|
|
|
)
|
|
|
|
if not updated_users:
|
|
|
|
updated_users = []
|
|
|
|
|
|
|
|
# Get the actual presence update for each change
|
|
|
|
users_to_state = await self.get_presence_handler().current_state_for_users(
|
|
|
|
updated_users
|
|
|
|
)
|
|
|
|
presence_updates = list(users_to_state.values())
|
|
|
|
|
|
|
|
if not include_offline:
|
|
|
|
# Filter out offline states
|
|
|
|
presence_updates = self._filter_offline_presence_state(presence_updates)
|
2017-03-15 10:27:34 -04:00
|
|
|
else:
|
2021-04-06 09:38:30 -04:00
|
|
|
users_to_state = await self.store.get_presence_for_all_users(
|
|
|
|
include_offline=include_offline
|
2019-06-20 05:32:02 -04:00
|
|
|
)
|
2014-08-12 10:10:52 -04:00
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
presence_updates = list(users_to_state.values())
|
|
|
|
|
|
|
|
# TODO: This feels wildly inefficient, and it's unfortunate we need to ask the
|
|
|
|
# module for information on a number of users when we then only take the info
|
|
|
|
# for a single user
|
|
|
|
|
|
|
|
# Filter through the presence router
|
|
|
|
users_to_state_set = await self.get_presence_router().get_users_for_states(
|
|
|
|
presence_updates
|
|
|
|
)
|
|
|
|
|
|
|
|
# We only want the mapping for the syncing user
|
|
|
|
presence_updates = list(users_to_state_set[user_id])
|
|
|
|
|
|
|
|
# Return presence information for all users
|
|
|
|
return presence_updates
|
|
|
|
|
|
|
|
def _filter_offline_presence_state(
|
|
|
|
self, presence_updates: Iterable[UserPresenceState]
|
|
|
|
) -> List[UserPresenceState]:
|
|
|
|
"""Given an iterable containing user presence updates, return a list with any offline
|
|
|
|
presence states removed.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
presence_updates: Presence states to filter
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A new list with any offline presence states removed.
|
|
|
|
"""
|
|
|
|
return [
|
|
|
|
update
|
|
|
|
for update in presence_updates
|
|
|
|
if update.state != PresenceState.OFFLINE
|
|
|
|
]
|
|
|
|
|
2016-02-15 12:10:40 -05:00
|
|
|
def get_current_key(self):
|
|
|
|
return self.store.get_current_presence_token()
|
2014-08-13 14:18:55 -04:00
|
|
|
|
2020-02-26 10:33:26 -05:00
|
|
|
@cached(num_args=2, cache_context=True)
|
2021-04-06 09:38:30 -04:00
|
|
|
async def _get_interested_in(
|
|
|
|
self,
|
|
|
|
user: UserID,
|
|
|
|
explicit_room_id: Optional[str] = None,
|
|
|
|
cache_context: Optional[_CacheContext] = None,
|
|
|
|
) -> Union[Set[str], str]:
|
2017-03-15 11:29:19 -04:00
|
|
|
"""Returns the set of users that the given user should see presence
|
2021-04-06 09:38:30 -04:00
|
|
|
updates for.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user: The user to retrieve presence updates for.
|
|
|
|
explicit_room_id: The users that are in the room will be returned.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A set of user IDs to return presence updates for, or "ALL" to return all
|
|
|
|
known updates.
|
2017-03-15 11:29:19 -04:00
|
|
|
"""
|
|
|
|
user_id = user.to_string()
|
2019-04-03 06:11:15 -04:00
|
|
|
users_interested_in = set()
|
2017-03-15 11:29:19 -04:00
|
|
|
users_interested_in.add(user_id) # So that we receive our own presence
|
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# cache_context isn't likely to ever be None due to the @cached decorator,
|
|
|
|
# but we can't have a non-optional argument after the optional argument
|
|
|
|
# explicit_room_id either. Assert cache_context is not None so we can use it
|
|
|
|
# without mypy complaining.
|
|
|
|
assert cache_context
|
|
|
|
|
|
|
|
# Check with the presence router whether we should poll additional users for
|
|
|
|
# their presence information
|
|
|
|
additional_users = await self.get_presence_router().get_interested_users(
|
|
|
|
user.to_string()
|
|
|
|
)
|
|
|
|
if additional_users == PresenceRouter.ALL_USERS:
|
|
|
|
# If the module requested that this user see the presence updates of *all*
|
|
|
|
# users, then simply return that instead of calculating what rooms this
|
|
|
|
# user shares
|
|
|
|
return PresenceRouter.ALL_USERS
|
|
|
|
|
|
|
|
# Add the additional users from the router
|
|
|
|
users_interested_in.update(additional_users)
|
|
|
|
|
|
|
|
# Find the users who share a room with this user
|
2020-02-26 10:33:26 -05:00
|
|
|
users_who_share_room = await self.store.get_users_who_share_room_with_user(
|
2017-03-15 11:29:19 -04:00
|
|
|
user_id, on_invalidate=cache_context.invalidate
|
|
|
|
)
|
|
|
|
users_interested_in.update(users_who_share_room)
|
|
|
|
|
|
|
|
if explicit_room_id:
|
2020-02-26 10:33:26 -05:00
|
|
|
user_ids = await self.store.get_users_in_room(
|
2017-03-15 11:29:19 -04:00
|
|
|
explicit_room_id, on_invalidate=cache_context.invalidate
|
|
|
|
)
|
|
|
|
users_interested_in.update(user_ids)
|
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return users_interested_in
|
2017-03-15 11:29:19 -04:00
|
|
|
|
2016-02-18 06:52:33 -05:00
|
|
|
|
2016-06-02 10:20:15 -04:00
|
|
|
def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
|
2016-02-18 06:52:33 -05:00
|
|
|
"""Checks the presence of users that have timed out and updates as
|
|
|
|
appropriate.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_states(list): List of UserPresenceState's to check.
|
|
|
|
is_mine_fn (fn): Function that returns if a user_id is ours
|
2016-06-02 10:20:15 -04:00
|
|
|
syncing_user_ids (set): Set of user_ids with active syncs.
|
2016-02-18 06:52:33 -05:00
|
|
|
now (int): Current time in ms.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List of UserPresenceState updates
|
|
|
|
"""
|
|
|
|
changes = {} # Actual changes we need to notify people about
|
|
|
|
|
|
|
|
for state in user_states:
|
|
|
|
is_mine = is_mine_fn(state.user_id)
|
|
|
|
|
2016-06-02 10:20:15 -04:00
|
|
|
new_state = handle_timeout(state, is_mine, syncing_user_ids, now)
|
2016-02-18 06:52:33 -05:00
|
|
|
if new_state:
|
|
|
|
changes[state.user_id] = new_state
|
|
|
|
|
2018-05-31 05:03:47 -04:00
|
|
|
return list(changes.values())
|
2016-02-18 06:52:33 -05:00
|
|
|
|
|
|
|
|
2016-06-02 10:20:15 -04:00
|
|
|
def handle_timeout(state, is_mine, syncing_user_ids, now):
|
2016-02-18 06:52:33 -05:00
|
|
|
"""Checks the presence of the user to see if any of the timers have elapsed
|
|
|
|
|
|
|
|
Args:
|
|
|
|
state (UserPresenceState)
|
|
|
|
is_mine (bool): Whether the user is ours
|
2016-06-02 10:20:15 -04:00
|
|
|
syncing_user_ids (set): Set of user_ids with active syncs.
|
2016-02-18 06:52:33 -05:00
|
|
|
now (int): Current time in ms.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A UserPresenceState update or None if no update.
|
|
|
|
"""
|
|
|
|
if state.state == PresenceState.OFFLINE:
|
|
|
|
# No timeouts are associated with offline states.
|
|
|
|
return None
|
|
|
|
|
|
|
|
changed = False
|
|
|
|
user_id = state.user_id
|
|
|
|
|
|
|
|
if is_mine:
|
|
|
|
if state.state == PresenceState.ONLINE:
|
|
|
|
if now - state.last_active_ts > IDLE_TIMER:
|
|
|
|
# Currently online, but last activity ages ago so auto
|
|
|
|
# idle
|
|
|
|
state = state.copy_and_replace(state=PresenceState.UNAVAILABLE)
|
|
|
|
changed = True
|
|
|
|
elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
|
|
|
# So that we send down a notification that we've
|
|
|
|
# stopped updating.
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL:
|
|
|
|
# Need to send ping to other servers to ensure they don't
|
|
|
|
# timeout and set us to offline
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
# If there are have been no sync for a while (and none ongoing),
|
|
|
|
# set presence to offline
|
2016-06-02 10:20:15 -04:00
|
|
|
if user_id not in syncing_user_ids:
|
2017-03-15 11:24:48 -04:00
|
|
|
# If the user has done something recently but hasn't synced,
|
|
|
|
# don't set them as offline.
|
2017-03-15 11:17:16 -04:00
|
|
|
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
|
|
|
|
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
|
2016-02-18 06:52:33 -05:00
|
|
|
state = state.copy_and_replace(
|
|
|
|
state=PresenceState.OFFLINE, status_msg=None
|
|
|
|
)
|
|
|
|
changed = True
|
|
|
|
else:
|
2017-11-16 20:53:50 -05:00
|
|
|
# We expect to be poked occasionally by the other side.
|
2016-02-18 06:52:33 -05:00
|
|
|
# This is to protect against forgetful/buggy servers, so that
|
|
|
|
# no one gets stuck online forever.
|
|
|
|
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
|
|
|
|
# The other side seems to have disappeared.
|
|
|
|
state = state.copy_and_replace(state=PresenceState.OFFLINE, status_msg=None)
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
return state if changed else None
|
|
|
|
|
|
|
|
|
|
|
|
def handle_update(prev_state, new_state, is_mine, wheel_timer, now):
|
|
|
|
"""Given a presence update:
|
|
|
|
1. Add any appropriate timers.
|
|
|
|
2. Check if we should notify anyone.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
prev_state (UserPresenceState)
|
|
|
|
new_state (UserPresenceState)
|
|
|
|
is_mine (bool): Whether the user is ours
|
|
|
|
wheel_timer (WheelTimer)
|
|
|
|
now (int): Time now in ms
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
3-tuple: `(new_state, persist_and_notify, federation_ping)` where:
|
|
|
|
- new_state: is the state to actually persist
|
|
|
|
- persist_and_notify (bool): whether to persist and notify people
|
|
|
|
- federation_ping (bool): whether we should send a ping over federation
|
|
|
|
"""
|
|
|
|
user_id = new_state.user_id
|
|
|
|
|
|
|
|
persist_and_notify = False
|
|
|
|
federation_ping = False
|
|
|
|
|
|
|
|
# If the users are ours then we want to set up a bunch of timers
|
|
|
|
# to time things out.
|
|
|
|
if is_mine:
|
|
|
|
if new_state.state == PresenceState.ONLINE:
|
|
|
|
# Idle timer
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER
|
|
|
|
)
|
|
|
|
|
2016-02-19 05:58:27 -05:00
|
|
|
active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY
|
|
|
|
new_state = new_state.copy_and_replace(currently_active=active)
|
|
|
|
|
|
|
|
if active:
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
|
|
|
|
)
|
|
|
|
|
2016-02-18 06:52:33 -05:00
|
|
|
if new_state.state != PresenceState.OFFLINE:
|
|
|
|
# User has stopped syncing
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
|
|
|
)
|
|
|
|
|
|
|
|
last_federate = new_state.last_federation_update_ts
|
|
|
|
if now - last_federate > FEDERATION_PING_INTERVAL:
|
|
|
|
# Been a while since we've poked remote servers
|
|
|
|
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
|
|
|
|
federation_ping = True
|
|
|
|
|
|
|
|
else:
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check whether the change was something worth notifying about
|
|
|
|
if should_notify(prev_state, new_state):
|
|
|
|
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
|
|
|
|
persist_and_notify = True
|
|
|
|
|
|
|
|
return new_state, persist_and_notify, federation_ping
|
2017-04-11 10:19:26 -04:00
|
|
|
|
2017-04-11 10:30:02 -04:00
|
|
|
|
2020-07-23 16:47:36 -04:00
|
|
|
async def get_interested_parties(
|
2021-04-06 09:38:30 -04:00
|
|
|
store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState]
|
2020-07-23 16:47:36 -04:00
|
|
|
) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]:
|
2017-04-11 10:30:02 -04:00
|
|
|
"""Given a list of states return which entities (rooms, users)
|
|
|
|
are interested in the given states.
|
|
|
|
|
|
|
|
Args:
|
2021-04-06 09:38:30 -04:00
|
|
|
store: The homeserver's data store.
|
|
|
|
presence_router: A module for augmenting the destinations for presence updates.
|
|
|
|
states: A list of incoming user presence updates.
|
2017-04-11 10:30:02 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-07-23 16:47:36 -04:00
|
|
|
A 2-tuple of `(room_ids_to_states, users_to_states)`,
|
2017-04-11 10:30:02 -04:00
|
|
|
with each item being a dict of `entity_name` -> `[UserPresenceState]`
|
|
|
|
"""
|
2020-02-26 10:33:26 -05:00
|
|
|
room_ids_to_states = {} # type: Dict[str, List[UserPresenceState]]
|
|
|
|
users_to_states = {} # type: Dict[str, List[UserPresenceState]]
|
2017-04-11 10:30:02 -04:00
|
|
|
for state in states:
|
2020-07-23 16:47:36 -04:00
|
|
|
room_ids = await store.get_rooms_for_user(state.user_id)
|
2017-04-11 10:30:02 -04:00
|
|
|
for room_id in room_ids:
|
|
|
|
room_ids_to_states.setdefault(room_id, []).append(state)
|
|
|
|
|
|
|
|
# Always notify self
|
|
|
|
users_to_states.setdefault(state.user_id, []).append(state)
|
|
|
|
|
2021-04-06 09:38:30 -04:00
|
|
|
# Ask a presence routing module for any additional parties if one
|
|
|
|
# is loaded.
|
|
|
|
router_users_to_states = await presence_router.get_users_for_states(states)
|
|
|
|
|
|
|
|
# Update the dictionaries with additional destinations and state to send
|
|
|
|
for user_id, user_states in router_users_to_states.items():
|
|
|
|
users_to_states.setdefault(user_id, []).extend(user_states)
|
|
|
|
|
2019-08-30 11:28:26 -04:00
|
|
|
return room_ids_to_states, users_to_states
|
2017-04-11 10:30:02 -04:00
|
|
|
|
|
|
|
|
2020-07-23 16:47:36 -04:00
|
|
|
async def get_interested_remotes(
|
2021-04-06 09:38:30 -04:00
|
|
|
store: DataStore,
|
|
|
|
presence_router: PresenceRouter,
|
|
|
|
states: List[UserPresenceState],
|
|
|
|
state_handler: StateHandler,
|
2020-08-24 14:25:27 -04:00
|
|
|
) -> List[Tuple[Collection[str], List[UserPresenceState]]]:
|
2017-04-11 10:19:26 -04:00
|
|
|
"""Given a list of presence states figure out which remote servers
|
|
|
|
should be sent which.
|
|
|
|
|
|
|
|
All the presence states should be for local users only.
|
|
|
|
|
|
|
|
Args:
|
2021-04-06 09:38:30 -04:00
|
|
|
store: The homeserver's data store.
|
|
|
|
presence_router: A module for augmenting the destinations for presence updates.
|
|
|
|
states: A list of incoming user presence updates.
|
|
|
|
state_handler:
|
2017-04-11 10:19:26 -04:00
|
|
|
|
|
|
|
Returns:
|
2020-07-23 16:47:36 -04:00
|
|
|
A list of 2-tuples of destinations and states, where for
|
|
|
|
each tuple the list of UserPresenceState should be sent to each
|
2017-04-11 10:19:26 -04:00
|
|
|
destination
|
|
|
|
"""
|
2020-08-24 14:25:27 -04:00
|
|
|
hosts_and_states = [] # type: List[Tuple[Collection[str], List[UserPresenceState]]]
|
2017-04-11 10:28:24 -04:00
|
|
|
|
2017-04-11 10:19:26 -04:00
|
|
|
# First we look up the rooms each user is in (as well as any explicit
|
|
|
|
# subscriptions), then for each distinct room we look up the remote
|
|
|
|
# hosts in those rooms.
|
2021-04-06 09:38:30 -04:00
|
|
|
room_ids_to_states, users_to_states = await get_interested_parties(
|
|
|
|
store, presence_router, states
|
|
|
|
)
|
2017-04-11 10:19:26 -04:00
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for room_id, states in room_ids_to_states.items():
|
2020-07-23 16:47:36 -04:00
|
|
|
hosts = await state_handler.get_current_hosts_in_room(room_id)
|
2017-04-11 10:30:02 -04:00
|
|
|
hosts_and_states.append((hosts, states))
|
2017-04-11 10:19:26 -04:00
|
|
|
|
2020-06-15 07:03:36 -04:00
|
|
|
for user_id, states in users_to_states.items():
|
2017-04-11 10:19:26 -04:00
|
|
|
host = get_domain_from_id(user_id)
|
2017-04-11 10:30:02 -04:00
|
|
|
hosts_and_states.append(([host], states))
|
2017-04-11 10:19:26 -04:00
|
|
|
|
2019-07-23 09:00:55 -04:00
|
|
|
return hosts_and_states
|