2020-04-06 04:58:42 -04:00
|
|
|
# Copyright 2017 Vector Creations Ltd
|
2022-05-19 11:29:08 -04:00
|
|
|
# Copyright 2020, 2022 The Matrix.org Foundation C.I.C.
|
2020-04-06 04:58:42 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import logging
|
2020-07-16 10:49:37 -04:00
|
|
|
from typing import (
|
2021-01-26 05:54:54 -05:00
|
|
|
TYPE_CHECKING,
|
2020-07-16 10:49:37 -04:00
|
|
|
Any,
|
2020-07-27 13:54:43 -04:00
|
|
|
Awaitable,
|
2020-07-16 10:49:37 -04:00
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
Iterator,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
TypeVar,
|
|
|
|
Union,
|
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
|
|
|
from prometheus_client import Counter
|
2020-07-16 10:49:37 -04:00
|
|
|
from typing_extensions import Deque
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-04-22 08:07:41 -04:00
|
|
|
from twisted.internet.protocol import ReconnectingClientFactory
|
|
|
|
|
2020-04-07 05:51:07 -04:00
|
|
|
from synapse.metrics import LaterGauge
|
2020-07-27 13:54:43 -04:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2020-04-06 04:58:42 -04:00
|
|
|
from synapse.replication.tcp.commands import (
|
2020-04-07 05:51:07 -04:00
|
|
|
ClearUserSyncsCommand,
|
2020-04-06 04:58:42 -04:00
|
|
|
Command,
|
|
|
|
FederationAckCommand,
|
|
|
|
PositionCommand,
|
|
|
|
RdataCommand,
|
|
|
|
RemoteServerUpCommand,
|
2020-04-07 05:51:07 -04:00
|
|
|
ReplicateCommand,
|
2020-04-06 04:58:42 -04:00
|
|
|
UserIpCommand,
|
|
|
|
UserSyncCommand,
|
|
|
|
)
|
2021-03-12 11:37:57 -05:00
|
|
|
from synapse.replication.tcp.protocol import IReplicationConnection
|
2020-05-13 05:27:02 -04:00
|
|
|
from synapse.replication.tcp.streams import (
|
|
|
|
STREAMS_MAP,
|
2021-01-18 10:47:59 -05:00
|
|
|
AccountDataStream,
|
2020-05-22 11:11:35 -04:00
|
|
|
BackfillStream,
|
2020-05-13 05:27:02 -04:00
|
|
|
CachesStream,
|
2020-05-22 11:11:35 -04:00
|
|
|
EventsStream,
|
2020-05-13 05:27:02 -04:00
|
|
|
FederationStream,
|
2021-04-23 07:21:55 -04:00
|
|
|
PresenceFederationStream,
|
|
|
|
PresenceStream,
|
2021-01-18 10:47:59 -05:00
|
|
|
ReceiptsStream,
|
2020-05-13 05:27:02 -04:00
|
|
|
Stream,
|
2021-01-07 15:19:26 -05:00
|
|
|
ToDeviceStream,
|
2020-07-16 10:12:54 -04:00
|
|
|
TypingStream,
|
2020-05-13 05:27:02 -04:00
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2021-01-26 05:54:54 -05:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2020-04-06 04:58:42 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
# number of updates received for each RDATA stream
|
|
|
|
inbound_rdata_count = Counter(
|
|
|
|
"synapse_replication_tcp_protocol_inbound_rdata_count", "", ["stream_name"]
|
|
|
|
)
|
2020-04-07 05:51:07 -04:00
|
|
|
user_sync_counter = Counter("synapse_replication_tcp_resource_user_sync", "")
|
|
|
|
federation_ack_counter = Counter("synapse_replication_tcp_resource_federation_ack", "")
|
|
|
|
remove_pusher_counter = Counter("synapse_replication_tcp_resource_remove_pusher", "")
|
2020-07-21 19:40:55 -04:00
|
|
|
|
2020-04-07 05:51:07 -04:00
|
|
|
user_ip_cache_counter = Counter("synapse_replication_tcp_resource_user_ip_cache", "")
|
2020-04-06 04:58:42 -04:00
|
|
|
|
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# the type of the entries in _command_queues_by_stream
|
|
|
|
_StreamCommandQueue = Deque[
|
2021-03-12 11:37:57 -05:00
|
|
|
Tuple[Union[RdataCommand, PositionCommand], IReplicationConnection]
|
2020-07-16 10:49:37 -04:00
|
|
|
]
|
|
|
|
|
|
|
|
|
2020-04-06 04:58:42 -04:00
|
|
|
class ReplicationCommandHandler:
|
|
|
|
"""Handles incoming commands from replication as well as sending commands
|
|
|
|
back out to connections.
|
|
|
|
"""
|
|
|
|
|
2021-01-26 05:54:54 -05:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-04-06 04:58:42 -04:00
|
|
|
self._replication_data_handler = hs.get_replication_data_handler()
|
|
|
|
self._presence_handler = hs.get_presence_handler()
|
2022-02-23 06:04:02 -05:00
|
|
|
self._store = hs.get_datastores().main
|
2020-04-07 05:51:07 -04:00
|
|
|
self._notifier = hs.get_notifier()
|
|
|
|
self._clock = hs.get_clock()
|
|
|
|
self._instance_id = hs.get_instance_id()
|
2020-04-29 11:23:08 -04:00
|
|
|
self._instance_name = hs.get_instance_name()
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2022-05-19 11:29:08 -04:00
|
|
|
# Additional Redis channel suffixes to subscribe to.
|
|
|
|
self._channels_to_subscribe_to: List[str] = []
|
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
self._is_presence_writer = (
|
|
|
|
hs.get_instance_name() in hs.config.worker.writers.presence
|
|
|
|
)
|
|
|
|
|
2021-07-15 06:02:43 -04:00
|
|
|
self._streams: Dict[str, Stream] = {
|
2020-04-06 04:58:42 -04:00
|
|
|
stream.NAME: stream(hs) for stream in STREAMS_MAP.values()
|
2021-07-15 06:02:43 -04:00
|
|
|
}
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-05-13 05:27:02 -04:00
|
|
|
# List of streams that this instance is the source of
|
2021-07-15 06:02:43 -04:00
|
|
|
self._streams_to_replicate: List[Stream] = []
|
2020-05-13 05:27:02 -04:00
|
|
|
|
|
|
|
for stream in self._streams.values():
|
2020-10-12 10:51:41 -04:00
|
|
|
if hs.config.redis.redis_enabled and stream.NAME == CachesStream.NAME:
|
|
|
|
# All workers can write to the cache invalidation stream when
|
|
|
|
# using redis.
|
2020-05-13 05:27:02 -04:00
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
continue
|
|
|
|
|
2020-05-22 11:11:35 -04:00
|
|
|
if isinstance(stream, (EventsStream, BackfillStream)):
|
|
|
|
# Only add EventStream and BackfillStream as a source on the
|
|
|
|
# instance in charge of event persistence.
|
2020-09-14 05:16:41 -04:00
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.events:
|
2020-05-22 11:11:35 -04:00
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
2021-01-07 15:19:26 -05:00
|
|
|
if isinstance(stream, ToDeviceStream):
|
|
|
|
# Only add ToDeviceStream as a source on instances in charge of
|
|
|
|
# sending to device messages.
|
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.to_device:
|
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
2020-07-16 10:12:54 -04:00
|
|
|
if isinstance(stream, TypingStream):
|
|
|
|
# Only add TypingStream as a source on the instance in charge of
|
|
|
|
# typing.
|
2021-11-03 10:25:47 -04:00
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.typing:
|
2020-07-16 10:12:54 -04:00
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
2023-01-13 09:57:43 -05:00
|
|
|
if isinstance(stream, AccountDataStream):
|
2021-01-18 10:47:59 -05:00
|
|
|
# Only add AccountDataStream and TagAccountDataStream as a source on the
|
|
|
|
# instance in charge of account_data persistence.
|
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.account_data:
|
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
if isinstance(stream, ReceiptsStream):
|
|
|
|
# Only add ReceiptsStream as a source on the instance in charge of
|
|
|
|
# receipts.
|
|
|
|
if hs.get_instance_name() in hs.config.worker.writers.receipts:
|
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
if isinstance(stream, (PresenceStream, PresenceFederationStream)):
|
|
|
|
# Only add PresenceStream as a source on the instance in charge
|
|
|
|
# of presence.
|
|
|
|
if self._is_presence_writer:
|
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
2020-05-13 05:27:02 -04:00
|
|
|
# Only add any other streams if we're on master.
|
2021-09-13 13:07:12 -04:00
|
|
|
if hs.config.worker.worker_app is not None:
|
2020-05-13 05:27:02 -04:00
|
|
|
continue
|
|
|
|
|
2021-10-06 10:47:41 -04:00
|
|
|
if (
|
|
|
|
stream.NAME == FederationStream.NAME
|
|
|
|
and hs.config.worker.send_federation
|
|
|
|
):
|
2020-05-13 05:27:02 -04:00
|
|
|
# We only support federation stream if federation sending
|
|
|
|
# has been disabled on the master.
|
|
|
|
continue
|
|
|
|
|
|
|
|
self._streams_to_replicate.append(stream)
|
|
|
|
|
2020-06-15 08:44:54 -04:00
|
|
|
# Map of stream name to batched updates. See RdataCommand for info on
|
|
|
|
# how batching works.
|
2021-07-15 06:02:43 -04:00
|
|
|
self._pending_batches: Dict[str, List[Any]] = {}
|
2020-04-06 04:58:42 -04:00
|
|
|
|
|
|
|
# The factory used to create connections.
|
2021-07-15 06:02:43 -04:00
|
|
|
self._factory: Optional[ReconnectingClientFactory] = None
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-05-05 13:53:38 -04:00
|
|
|
# The currently connected connections. (The list of places we need to send
|
|
|
|
# outgoing replication commands to.)
|
2021-07-15 06:02:43 -04:00
|
|
|
self._connections: List[IReplicationConnection] = []
|
2020-04-07 05:51:07 -04:00
|
|
|
|
|
|
|
LaterGauge(
|
|
|
|
"synapse_replication_tcp_resource_total_connections",
|
|
|
|
"",
|
|
|
|
[],
|
|
|
|
lambda: len(self._connections),
|
|
|
|
)
|
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# When POSITION or RDATA commands arrive, we stick them in a queue and process
|
|
|
|
# them in order in a separate background process.
|
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
# the streams which are currently being processed by _unsafe_process_queue
|
2021-07-15 06:02:43 -04:00
|
|
|
self._processing_streams: Set[str] = set()
|
2020-07-16 10:49:37 -04:00
|
|
|
|
|
|
|
# for each stream, a queue of commands that are awaiting processing, and the
|
|
|
|
# connection that they arrived on.
|
|
|
|
self._command_queues_by_stream = {
|
|
|
|
stream_name: _StreamCommandQueue() for stream_name in self._streams
|
|
|
|
}
|
|
|
|
|
|
|
|
# For each connection, the incoming stream names that have received a POSITION
|
|
|
|
# from that connection.
|
2021-07-15 06:02:43 -04:00
|
|
|
self._streams_by_connection: Dict[IReplicationConnection, Set[str]] = {}
|
2020-07-16 10:49:37 -04:00
|
|
|
|
|
|
|
LaterGauge(
|
|
|
|
"synapse_replication_tcp_command_queue",
|
|
|
|
"Number of inbound RDATA/POSITION commands queued for processing",
|
|
|
|
["stream_name"],
|
|
|
|
lambda: {
|
|
|
|
(stream_name,): len(queue)
|
|
|
|
for stream_name, queue in self._command_queues_by_stream.items()
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2021-09-13 13:07:12 -04:00
|
|
|
self._is_master = hs.config.worker.worker_app is None
|
2020-04-07 05:51:07 -04:00
|
|
|
|
|
|
|
self._federation_sender = None
|
2021-10-06 10:47:41 -04:00
|
|
|
if self._is_master and not hs.config.worker.send_federation:
|
2020-04-07 05:51:07 -04:00
|
|
|
self._federation_sender = hs.get_federation_sender()
|
|
|
|
|
|
|
|
self._server_notices_sender = None
|
|
|
|
if self._is_master:
|
|
|
|
self._server_notices_sender = hs.get_server_notices_sender()
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2022-04-01 08:08:55 -04:00
|
|
|
if hs.config.redis.redis_enabled:
|
|
|
|
# If we're using Redis, it's the background worker that should
|
|
|
|
# receive USER_IP commands and store the relevant client IPs.
|
|
|
|
self._should_insert_client_ips = hs.config.worker.run_background_tasks
|
|
|
|
else:
|
|
|
|
# If we're NOT using Redis, this must be handled by the master
|
|
|
|
self._should_insert_client_ips = hs.get_instance_name() == "master"
|
|
|
|
|
2022-05-19 11:29:08 -04:00
|
|
|
if self._is_master or self._should_insert_client_ips:
|
|
|
|
self.subscribe_to_channel("USER_IP")
|
|
|
|
|
|
|
|
def subscribe_to_channel(self, channel_name: str) -> None:
|
|
|
|
"""
|
|
|
|
Indicates that we wish to subscribe to a Redis channel by name.
|
|
|
|
|
|
|
|
(The name will later be prefixed with the server name; i.e. subscribing
|
|
|
|
to the 'ABC' channel actually subscribes to 'example.com/ABC' Redis-side.)
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
- If replication has already started, then it's too late to subscribe
|
|
|
|
to new channels.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if self._factory is not None:
|
|
|
|
# We don't allow subscribing after the fact to avoid the chance
|
|
|
|
# of missing an important message because we didn't subscribe in time.
|
|
|
|
raise RuntimeError(
|
|
|
|
"Cannot subscribe to more channels after replication started."
|
|
|
|
)
|
|
|
|
|
|
|
|
if channel_name not in self._channels_to_subscribe_to:
|
|
|
|
self._channels_to_subscribe_to.append(channel_name)
|
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
def _add_command_to_stream_queue(
|
2021-03-12 11:37:57 -05:00
|
|
|
self, conn: IReplicationConnection, cmd: Union[RdataCommand, PositionCommand]
|
2020-07-16 10:49:37 -04:00
|
|
|
) -> None:
|
|
|
|
"""Queue the given received command for processing
|
|
|
|
|
|
|
|
Adds the given command to the per-stream queue, and processes the queue if
|
|
|
|
necessary
|
|
|
|
"""
|
|
|
|
stream_name = cmd.stream_name
|
|
|
|
queue = self._command_queues_by_stream.get(stream_name)
|
|
|
|
if queue is None:
|
|
|
|
logger.error("Got %s for unknown stream: %s", cmd.NAME, stream_name)
|
|
|
|
return
|
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
queue.append((cmd, conn))
|
|
|
|
|
|
|
|
# if we're already processing this stream, there's nothing more to do:
|
|
|
|
# the new entry on the queue will get picked up in due course
|
2020-07-16 10:49:37 -04:00
|
|
|
if stream_name in self._processing_streams:
|
|
|
|
return
|
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
# fire off a background process to start processing the queue.
|
|
|
|
run_as_background_process(
|
|
|
|
"process-replication-data", self._unsafe_process_queue, stream_name
|
|
|
|
)
|
2020-07-16 10:49:37 -04:00
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
async def _unsafe_process_queue(self, stream_name: str) -> None:
|
2020-07-27 13:54:43 -04:00
|
|
|
"""Processes the command queue for the given stream, until it is empty
|
|
|
|
|
|
|
|
Does not check if there is already a thread processing the queue, hence "unsafe"
|
|
|
|
"""
|
|
|
|
assert stream_name not in self._processing_streams
|
2020-07-16 10:49:37 -04:00
|
|
|
|
|
|
|
self._processing_streams.add(stream_name)
|
|
|
|
try:
|
2020-07-27 13:54:43 -04:00
|
|
|
queue = self._command_queues_by_stream.get(stream_name)
|
2020-07-16 10:49:37 -04:00
|
|
|
while queue:
|
|
|
|
cmd, conn = queue.popleft()
|
|
|
|
try:
|
|
|
|
await self._process_command(cmd, conn, stream_name)
|
|
|
|
except Exception:
|
|
|
|
logger.exception("Failed to handle command %s", cmd)
|
|
|
|
finally:
|
|
|
|
self._processing_streams.discard(stream_name)
|
|
|
|
|
|
|
|
async def _process_command(
|
|
|
|
self,
|
|
|
|
cmd: Union[PositionCommand, RdataCommand],
|
2021-03-12 11:37:57 -05:00
|
|
|
conn: IReplicationConnection,
|
2020-07-16 10:49:37 -04:00
|
|
|
stream_name: str,
|
|
|
|
) -> None:
|
|
|
|
if isinstance(cmd, PositionCommand):
|
|
|
|
await self._process_position(stream_name, conn, cmd)
|
|
|
|
elif isinstance(cmd, RdataCommand):
|
|
|
|
await self._process_rdata(stream_name, conn, cmd)
|
|
|
|
else:
|
|
|
|
# This shouldn't be possible
|
|
|
|
raise Exception("Unrecognised command %s in stream queue", cmd.NAME)
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def start_replication(self, hs: "HomeServer") -> None:
|
2022-03-10 08:01:56 -05:00
|
|
|
"""Helper method to start replication."""
|
2022-09-06 03:50:02 -04:00
|
|
|
from synapse.replication.tcp.redis import RedisDirectTcpReplicationClientFactory
|
2020-04-22 08:07:41 -04:00
|
|
|
|
2022-09-06 03:50:02 -04:00
|
|
|
# First let's ensure that we have a ReplicationStreamer started.
|
|
|
|
hs.get_replication_streamer()
|
2020-05-27 06:44:19 -04:00
|
|
|
|
2022-09-06 03:50:02 -04:00
|
|
|
# We need two connections to redis, one for the subscription stream and
|
|
|
|
# one to send commands to (as you can't send further redis commands to a
|
|
|
|
# connection after SUBSCRIBE is called).
|
2020-04-22 08:07:41 -04:00
|
|
|
|
2022-09-06 03:50:02 -04:00
|
|
|
# First create the connection for sending commands.
|
|
|
|
outbound_redis_connection = hs.get_outbound_redis_connection()
|
2020-04-22 08:07:41 -04:00
|
|
|
|
2022-09-06 03:50:02 -04:00
|
|
|
# Now create the factory/connection for the subscription stream.
|
|
|
|
self._factory = RedisDirectTcpReplicationClientFactory(
|
|
|
|
hs,
|
|
|
|
outbound_redis_connection,
|
|
|
|
channel_names=self._channels_to_subscribe_to,
|
|
|
|
)
|
|
|
|
hs.get_reactor().connectTCP(
|
|
|
|
hs.config.redis.redis_host,
|
|
|
|
hs.config.redis.redis_port,
|
|
|
|
self._factory,
|
|
|
|
timeout=30,
|
|
|
|
bindAddress=None,
|
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-05-13 05:27:02 -04:00
|
|
|
def get_streams(self) -> Dict[str, Stream]:
|
|
|
|
"""Get a map from stream name to all streams."""
|
|
|
|
return self._streams
|
|
|
|
|
|
|
|
def get_streams_to_replicate(self) -> List[Stream]:
|
|
|
|
"""Get a list of streams that this instances replicates."""
|
|
|
|
return self._streams_to_replicate
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def on_REPLICATE(self, conn: IReplicationConnection, cmd: ReplicateCommand) -> None:
|
2020-05-13 04:57:15 -04:00
|
|
|
self.send_positions_to_connection(conn)
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def send_positions_to_connection(self, conn: IReplicationConnection) -> None:
|
2020-05-13 04:57:15 -04:00
|
|
|
"""Send current position of all streams this process is source of to
|
|
|
|
the connection.
|
|
|
|
"""
|
|
|
|
|
2020-05-13 05:27:02 -04:00
|
|
|
# We respond with current position of all streams this instance
|
|
|
|
# replicates.
|
|
|
|
for stream in self.get_streams_to_replicate():
|
2020-10-12 10:51:41 -04:00
|
|
|
# Note that we use the current token as the prev token here (rather
|
|
|
|
# than stream.last_token), as we can't be sure that there have been
|
|
|
|
# no rows written between last token and the current token (since we
|
|
|
|
# might be racing with the replication sending bg process).
|
|
|
|
current_token = stream.current_token(self._instance_name)
|
2020-05-13 05:27:02 -04:00
|
|
|
self.send_command(
|
|
|
|
PositionCommand(
|
2020-10-12 10:51:41 -04:00
|
|
|
stream.NAME,
|
|
|
|
self._instance_name,
|
|
|
|
current_token,
|
|
|
|
current_token,
|
2020-05-13 05:27:02 -04:00
|
|
|
)
|
2020-04-29 11:23:08 -04:00
|
|
|
)
|
2020-04-07 05:51:07 -04:00
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
def on_USER_SYNC(
|
2021-03-12 11:37:57 -05:00
|
|
|
self, conn: IReplicationConnection, cmd: UserSyncCommand
|
2020-07-27 13:54:43 -04:00
|
|
|
) -> Optional[Awaitable[None]]:
|
2020-04-07 05:51:07 -04:00
|
|
|
user_sync_counter.inc()
|
|
|
|
|
2021-04-23 07:21:55 -04:00
|
|
|
if self._is_presence_writer:
|
2020-07-27 13:54:43 -04:00
|
|
|
return self._presence_handler.update_external_syncs_row(
|
2020-04-07 05:51:07 -04:00
|
|
|
cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms
|
|
|
|
)
|
2020-07-27 13:54:43 -04:00
|
|
|
else:
|
|
|
|
return None
|
2020-04-07 05:51:07 -04:00
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
def on_CLEAR_USER_SYNC(
|
2021-03-12 11:37:57 -05:00
|
|
|
self, conn: IReplicationConnection, cmd: ClearUserSyncsCommand
|
2020-07-27 13:54:43 -04:00
|
|
|
) -> Optional[Awaitable[None]]:
|
2021-04-23 07:21:55 -04:00
|
|
|
if self._is_presence_writer:
|
2020-07-27 13:54:43 -04:00
|
|
|
return self._presence_handler.update_external_syncs_clear(cmd.instance_id)
|
|
|
|
else:
|
|
|
|
return None
|
2020-04-07 05:51:07 -04:00
|
|
|
|
2021-03-12 11:37:57 -05:00
|
|
|
def on_FEDERATION_ACK(
|
|
|
|
self, conn: IReplicationConnection, cmd: FederationAckCommand
|
2022-02-08 11:03:08 -05:00
|
|
|
) -> None:
|
2020-04-07 05:51:07 -04:00
|
|
|
federation_ack_counter.inc()
|
|
|
|
|
|
|
|
if self._federation_sender:
|
2020-07-10 13:26:36 -04:00
|
|
|
self._federation_sender.federation_ack(cmd.instance_name, cmd.token)
|
2020-04-07 05:51:07 -04:00
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
def on_USER_IP(
|
2021-03-12 11:37:57 -05:00
|
|
|
self, conn: IReplicationConnection, cmd: UserIpCommand
|
2020-07-27 13:54:43 -04:00
|
|
|
) -> Optional[Awaitable[None]]:
|
2020-04-07 05:51:07 -04:00
|
|
|
user_ip_cache_counter.inc()
|
|
|
|
|
2022-04-01 08:08:55 -04:00
|
|
|
if self._is_master or self._should_insert_client_ips:
|
|
|
|
# We make a point of only returning an awaitable if there's actually
|
|
|
|
# something to do; on_USER_IP is not an async function, but
|
|
|
|
# _handle_user_ip is.
|
|
|
|
# If on_USER_IP returns an awaitable, it gets scheduled as a
|
|
|
|
# background process (see `BaseReplicationStreamProtocol.handle_command`).
|
2020-07-27 13:54:43 -04:00
|
|
|
return self._handle_user_ip(cmd)
|
|
|
|
else:
|
2022-04-01 08:08:55 -04:00
|
|
|
# Returning None when this process definitely has nothing to do
|
|
|
|
# reduces the overhead of handling the USER_IP command, which is
|
|
|
|
# currently broadcast to all workers regardless of utility.
|
2020-07-27 13:54:43 -04:00
|
|
|
return None
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
async def _handle_user_ip(self, cmd: UserIpCommand) -> None:
|
2022-04-01 08:08:55 -04:00
|
|
|
"""
|
|
|
|
Handles a User IP, branching depending on whether we are the main process
|
|
|
|
and/or the background worker.
|
|
|
|
"""
|
|
|
|
if self._is_master:
|
|
|
|
assert self._server_notices_sender is not None
|
|
|
|
await self._server_notices_sender.on_user_ip(cmd.user_id)
|
|
|
|
|
|
|
|
if self._should_insert_client_ips:
|
|
|
|
await self._store.insert_client_ip(
|
|
|
|
cmd.user_id,
|
|
|
|
cmd.access_token,
|
|
|
|
cmd.ip,
|
|
|
|
cmd.user_agent,
|
|
|
|
cmd.device_id,
|
|
|
|
cmd.last_seen,
|
|
|
|
)
|
2020-04-07 05:51:07 -04:00
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def on_RDATA(self, conn: IReplicationConnection, cmd: RdataCommand) -> None:
|
2020-04-29 11:23:08 -04:00
|
|
|
if cmd.instance_name == self._instance_name:
|
|
|
|
# Ignore RDATA that are just our own echoes
|
|
|
|
return
|
|
|
|
|
2020-04-06 04:58:42 -04:00
|
|
|
stream_name = cmd.stream_name
|
|
|
|
inbound_rdata_count.labels(stream_name).inc()
|
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# We put the received command into a queue here for two reasons:
|
2020-04-07 06:01:04 -04:00
|
|
|
# 1. so we don't try and concurrently handle multiple rows for the
|
|
|
|
# same stream, and
|
|
|
|
# 2. so we don't race with getting a POSITION command and fetching
|
|
|
|
# missing RDATA.
|
2020-07-16 10:49:37 -04:00
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
self._add_command_to_stream_queue(conn, cmd)
|
2020-07-16 10:49:37 -04:00
|
|
|
|
|
|
|
async def _process_rdata(
|
2021-03-12 11:37:57 -05:00
|
|
|
self, stream_name: str, conn: IReplicationConnection, cmd: RdataCommand
|
2020-07-16 10:49:37 -04:00
|
|
|
) -> None:
|
|
|
|
"""Process an RDATA command
|
|
|
|
|
|
|
|
Called after the command has been popped off the queue of inbound commands
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
row = STREAMS_MAP[stream_name].parse_row(cmd.row)
|
|
|
|
except Exception as e:
|
|
|
|
raise Exception(
|
|
|
|
"Failed to parse RDATA: %r %r" % (stream_name, cmd.row)
|
|
|
|
) from e
|
|
|
|
|
|
|
|
# make sure that we've processed a POSITION for this stream *on this
|
|
|
|
# connection*. (A POSITION on another connection is no good, as there
|
|
|
|
# is no guarantee that we have seen all the intermediate updates.)
|
|
|
|
sbc = self._streams_by_connection.get(conn)
|
|
|
|
if not sbc or stream_name not in sbc:
|
|
|
|
# Let's drop the row for now, on the assumption we'll receive a
|
|
|
|
# `POSITION` soon and we'll catch up correctly then.
|
|
|
|
logger.debug(
|
|
|
|
"Discarding RDATA for unconnected stream %s -> %s",
|
|
|
|
stream_name,
|
|
|
|
cmd.token,
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
if cmd.token is None:
|
|
|
|
# I.e. this is part of a batch of updates for this stream (in
|
|
|
|
# which case batch until we get an update for the stream with a non
|
|
|
|
# None token).
|
|
|
|
self._pending_batches.setdefault(stream_name, []).append(row)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check if this is the last of a batch of updates
|
|
|
|
rows = self._pending_batches.pop(stream_name, [])
|
|
|
|
rows.append(row)
|
|
|
|
|
|
|
|
stream = self._streams[stream_name]
|
|
|
|
|
|
|
|
# Find where we previously streamed up to.
|
|
|
|
current_token = stream.current_token(cmd.instance_name)
|
|
|
|
|
|
|
|
# Discard this data if this token is earlier than the current
|
|
|
|
# position. Note that streams can be reset (in which case you
|
|
|
|
# expect an earlier token), but that must be preceded by a
|
|
|
|
# POSITION command.
|
|
|
|
if cmd.token <= current_token:
|
|
|
|
logger.debug(
|
|
|
|
"Discarding RDATA from stream %s at position %s before previous position %s",
|
|
|
|
stream_name,
|
|
|
|
cmd.token,
|
|
|
|
current_token,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
await self.on_rdata(stream_name, cmd.instance_name, cmd.token, rows)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-05-01 12:19:56 -04:00
|
|
|
async def on_rdata(
|
|
|
|
self, stream_name: str, instance_name: str, token: int, rows: list
|
2022-02-08 11:03:08 -05:00
|
|
|
) -> None:
|
2020-04-06 04:58:42 -04:00
|
|
|
"""Called to handle a batch of replication data with a given stream token.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
stream_name: name of the replication stream for this batch of rows
|
2020-05-01 12:19:56 -04:00
|
|
|
instance_name: the instance that wrote the rows.
|
2020-04-06 04:58:42 -04:00
|
|
|
token: stream token for this batch of rows
|
|
|
|
rows: a list of Stream.ROW_TYPE objects as returned by
|
|
|
|
Stream.parse_row.
|
|
|
|
"""
|
2020-05-07 08:51:08 -04:00
|
|
|
logger.debug("Received rdata %s (%s) -> %s", stream_name, instance_name, token)
|
2020-05-01 12:19:56 -04:00
|
|
|
await self._replication_data_handler.on_rdata(
|
|
|
|
stream_name, instance_name, token, rows
|
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def on_POSITION(self, conn: IReplicationConnection, cmd: PositionCommand) -> None:
|
2020-04-29 11:23:08 -04:00
|
|
|
if cmd.instance_name == self._instance_name:
|
|
|
|
# Ignore POSITION that are just our own echoes
|
|
|
|
return
|
|
|
|
|
2022-05-05 05:20:23 -04:00
|
|
|
logger.debug("Handling '%s %s'", cmd.NAME, cmd.to_line())
|
2020-05-05 14:32:35 -04:00
|
|
|
|
2020-07-27 13:54:43 -04:00
|
|
|
self._add_command_to_stream_queue(conn, cmd)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
async def _process_position(
|
2021-03-12 11:37:57 -05:00
|
|
|
self, stream_name: str, conn: IReplicationConnection, cmd: PositionCommand
|
2020-07-16 10:49:37 -04:00
|
|
|
) -> None:
|
|
|
|
"""Process a POSITION command
|
2020-04-07 06:01:04 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
Called after the command has been popped off the queue of inbound commands
|
|
|
|
"""
|
|
|
|
stream = self._streams[stream_name]
|
2020-04-20 06:43:29 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# We're about to go and catch up with the stream, so remove from set
|
|
|
|
# of connected streams.
|
|
|
|
for streams in self._streams_by_connection.values():
|
|
|
|
streams.discard(stream_name)
|
2020-04-20 06:43:29 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# We clear the pending batches for the stream as the fetching of the
|
|
|
|
# missing updates below will fetch all rows in the batch.
|
|
|
|
self._pending_batches.pop(stream_name, [])
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# Find where we previously streamed up to.
|
|
|
|
current_token = stream.current_token(cmd.instance_name)
|
2020-05-05 14:32:35 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
# If the position token matches our current token then we're up to
|
|
|
|
# date and there's nothing to do. Otherwise, fetch all updates
|
|
|
|
# between then and now.
|
2020-10-12 10:51:41 -04:00
|
|
|
missing_updates = cmd.prev_token != current_token
|
2020-07-16 10:49:37 -04:00
|
|
|
while missing_updates:
|
2022-05-05 05:20:23 -04:00
|
|
|
# Note: There may very well not be any new updates, but we check to
|
|
|
|
# make sure. This can particularly happen for the event stream where
|
|
|
|
# event persisters continuously send `POSITION`. See `resource.py`
|
|
|
|
# for why this can happen.
|
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
logger.info(
|
|
|
|
"Fetching replication rows for '%s' between %i and %i",
|
|
|
|
stream_name,
|
|
|
|
current_token,
|
2020-10-12 10:51:41 -04:00
|
|
|
cmd.new_token,
|
2020-05-07 08:51:08 -04:00
|
|
|
)
|
2020-07-16 10:49:37 -04:00
|
|
|
(updates, current_token, missing_updates) = await stream.get_updates_since(
|
2020-10-12 10:51:41 -04:00
|
|
|
cmd.instance_name, current_token, cmd.new_token
|
2020-07-16 10:49:37 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
# TODO: add some tests for this
|
|
|
|
|
|
|
|
# Some streams return multiple rows with the same stream IDs,
|
|
|
|
# which need to be processed in batches.
|
|
|
|
|
|
|
|
for token, rows in _batch_updates(updates):
|
|
|
|
await self.on_rdata(
|
|
|
|
stream_name,
|
|
|
|
cmd.instance_name,
|
|
|
|
token,
|
|
|
|
[stream.parse_row(row) for row in rows],
|
|
|
|
)
|
|
|
|
|
2022-05-05 05:20:23 -04:00
|
|
|
logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token)
|
2020-07-16 10:49:37 -04:00
|
|
|
|
|
|
|
# We've now caught up to position sent to us, notify handler.
|
|
|
|
await self._replication_data_handler.on_position(
|
2020-10-12 10:51:41 -04:00
|
|
|
cmd.stream_name, cmd.instance_name, cmd.new_token
|
2020-07-16 10:49:37 -04:00
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-07-16 10:49:37 -04:00
|
|
|
self._streams_by_connection.setdefault(conn, set()).add(stream_name)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2021-03-12 11:37:57 -05:00
|
|
|
def on_REMOTE_SERVER_UP(
|
|
|
|
self, conn: IReplicationConnection, cmd: RemoteServerUpCommand
|
2022-02-08 11:03:08 -05:00
|
|
|
) -> None:
|
2021-06-17 10:20:06 -04:00
|
|
|
"""Called when get a new REMOTE_SERVER_UP command."""
|
2020-04-06 04:58:42 -04:00
|
|
|
self._replication_data_handler.on_remote_server_up(cmd.data)
|
|
|
|
|
2020-04-29 09:10:59 -04:00
|
|
|
self._notifier.notify_remote_server_up(cmd.data)
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def new_connection(self, connection: IReplicationConnection) -> None:
|
2020-04-07 05:51:07 -04:00
|
|
|
"""Called when we have a new connection."""
|
|
|
|
self._connections.append(connection)
|
|
|
|
|
|
|
|
# If we are connected to replication as a client (rather than a server)
|
|
|
|
# we need to reset the reconnection delay on the client factory (which
|
|
|
|
# is used to do exponential back off when the connection drops).
|
|
|
|
#
|
|
|
|
# Ideally we would reset the delay when we've "fully established" the
|
|
|
|
# connection (for some definition thereof) to stop us from tightlooping
|
|
|
|
# on reconnection if something fails after this point and we drop the
|
|
|
|
# connection. Unfortunately, we don't really have a better definition of
|
|
|
|
# "fully established" than the connection being established.
|
|
|
|
if self._factory:
|
|
|
|
self._factory.resetDelay()
|
|
|
|
|
2020-04-22 17:39:04 -04:00
|
|
|
# Tell the other end if we have any users currently syncing.
|
|
|
|
currently_syncing = (
|
|
|
|
self._presence_handler.get_currently_syncing_users_for_replication()
|
|
|
|
)
|
|
|
|
|
2020-04-07 05:51:07 -04:00
|
|
|
now = self._clock.time_msec()
|
|
|
|
for user_id in currently_syncing:
|
|
|
|
connection.send_command(
|
|
|
|
UserSyncCommand(self._instance_id, user_id, True, now)
|
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def lost_connection(self, connection: IReplicationConnection) -> None:
|
2020-04-07 05:51:07 -04:00
|
|
|
"""Called when a connection is closed/lost."""
|
2020-05-05 14:32:35 -04:00
|
|
|
# we no longer need _streams_by_connection for this connection.
|
|
|
|
streams = self._streams_by_connection.pop(connection, None)
|
|
|
|
if streams:
|
|
|
|
logger.info(
|
|
|
|
"Lost replication connection; streams now disconnected: %s", streams
|
|
|
|
)
|
2020-04-07 05:51:07 -04:00
|
|
|
try:
|
|
|
|
self._connections.remove(connection)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2020-04-07 05:51:07 -04:00
|
|
|
def connected(self) -> bool:
|
|
|
|
"""Do we have any replication connections open?
|
|
|
|
|
|
|
|
Is used by e.g. `ReplicationStreamer` to no-op if nothing is connected.
|
|
|
|
"""
|
|
|
|
return bool(self._connections)
|
2020-04-06 04:58:42 -04:00
|
|
|
|
2023-03-16 11:13:30 -04:00
|
|
|
def send_command(self, cmd: Command) -> None:
|
2020-04-07 05:51:07 -04:00
|
|
|
"""Send a command to all connected connections.
|
2020-04-29 09:10:59 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
cmd
|
2020-04-06 04:58:42 -04:00
|
|
|
"""
|
2020-04-07 05:51:07 -04:00
|
|
|
if self._connections:
|
|
|
|
for connection in self._connections:
|
|
|
|
try:
|
|
|
|
connection.send_command(cmd)
|
|
|
|
except Exception:
|
|
|
|
# We probably want to catch some types of exceptions here
|
|
|
|
# and log them as warnings (e.g. connection gone), but I
|
|
|
|
# can't find what those exception types they would be.
|
|
|
|
logger.exception(
|
|
|
|
"Failed to write command %s to connection %s",
|
|
|
|
cmd.NAME,
|
|
|
|
connection,
|
|
|
|
)
|
2020-04-06 04:58:42 -04:00
|
|
|
else:
|
|
|
|
logger.warning("Dropping command as not connected: %r", cmd.NAME)
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def send_federation_ack(self, token: int) -> None:
|
2020-04-06 04:58:42 -04:00
|
|
|
"""Ack data for the federation stream. This allows the master to drop
|
|
|
|
data stored purely in memory.
|
|
|
|
"""
|
2020-07-10 13:26:36 -04:00
|
|
|
self.send_command(FederationAckCommand(self._instance_name, token))
|
2020-04-06 04:58:42 -04:00
|
|
|
|
|
|
|
def send_user_sync(
|
|
|
|
self, instance_id: str, user_id: str, is_syncing: bool, last_sync_ms: int
|
2022-02-08 11:03:08 -05:00
|
|
|
) -> None:
|
2020-04-06 04:58:42 -04:00
|
|
|
"""Poke the master that a user has started/stopped syncing."""
|
|
|
|
self.send_command(
|
|
|
|
UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms)
|
|
|
|
)
|
|
|
|
|
|
|
|
def send_user_ip(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
access_token: str,
|
|
|
|
ip: str,
|
|
|
|
user_agent: str,
|
2022-04-01 08:08:55 -04:00
|
|
|
device_id: Optional[str],
|
2020-04-06 04:58:42 -04:00
|
|
|
last_seen: int,
|
2022-02-08 11:03:08 -05:00
|
|
|
) -> None:
|
2020-04-06 04:58:42 -04:00
|
|
|
"""Tell the master that the user made a request."""
|
|
|
|
cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen)
|
|
|
|
self.send_command(cmd)
|
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def send_remote_server_up(self, server: str) -> None:
|
2020-04-06 04:58:42 -04:00
|
|
|
self.send_command(RemoteServerUpCommand(server))
|
2020-04-07 05:51:07 -04:00
|
|
|
|
2022-02-08 11:03:08 -05:00
|
|
|
def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None:
|
2022-03-11 09:00:15 -05:00
|
|
|
"""Called when a new update is available to stream to Redis subscribers.
|
2020-04-07 05:51:07 -04:00
|
|
|
|
|
|
|
We need to check if the client is interested in the stream or not
|
|
|
|
"""
|
2020-04-29 11:23:08 -04:00
|
|
|
self.send_command(RdataCommand(stream_name, self._instance_name, token, data))
|
2020-04-20 06:43:29 -04:00
|
|
|
|
|
|
|
|
|
|
|
UpdateToken = TypeVar("UpdateToken")
|
|
|
|
UpdateRow = TypeVar("UpdateRow")
|
|
|
|
|
|
|
|
|
|
|
|
def _batch_updates(
|
|
|
|
updates: Iterable[Tuple[UpdateToken, UpdateRow]]
|
|
|
|
) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]:
|
|
|
|
"""Collect stream updates with the same token together
|
|
|
|
|
|
|
|
Given a series of updates returned by Stream.get_updates_since(), collects
|
|
|
|
the updates which share the same stream_id together.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
|
|
|
|
[(1, a), (1, b), (2, c), (3, d), (3, e)]
|
|
|
|
|
|
|
|
becomes:
|
|
|
|
|
|
|
|
[
|
|
|
|
(1, [a, b]),
|
|
|
|
(2, [c]),
|
|
|
|
(3, [d, e]),
|
|
|
|
]
|
|
|
|
"""
|
|
|
|
|
|
|
|
update_iter = iter(updates)
|
|
|
|
|
|
|
|
first_update = next(update_iter, None)
|
|
|
|
if first_update is None:
|
|
|
|
# empty input
|
|
|
|
return
|
|
|
|
|
|
|
|
current_batch_token = first_update[0]
|
|
|
|
current_batch = [first_update[1]]
|
|
|
|
|
|
|
|
for token, row in update_iter:
|
|
|
|
if token != current_batch_token:
|
|
|
|
# different token to the previous row: flush the previous
|
|
|
|
# batch and start anew
|
|
|
|
yield current_batch_token, current_batch
|
|
|
|
current_batch_token = token
|
|
|
|
current_batch = []
|
|
|
|
|
|
|
|
current_batch.append(row)
|
|
|
|
|
|
|
|
# flush the final batch
|
|
|
|
yield current_batch_token, current_batch
|