2016-06-03 06:57:26 -04:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2017-08-15 10:57:46 -04:00
|
|
|
import contextlib
|
|
|
|
import logging
|
|
|
|
import sys
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2018-07-09 02:09:20 -04:00
|
|
|
from six import iteritems
|
|
|
|
|
|
|
|
from twisted.internet import defer, reactor
|
|
|
|
from twisted.web.resource import NoResource
|
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
import synapse
|
2017-03-27 11:33:44 -04:00
|
|
|
from synapse.api.constants import EventTypes
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.app import _base
|
2016-06-03 06:57:26 -04:00
|
|
|
from synapse.config._base import ConfigError
|
2016-06-16 06:06:12 -04:00
|
|
|
from synapse.config.homeserver import HomeServerConfig
|
|
|
|
from synapse.config.logger import setup_logging
|
2017-04-11 10:30:02 -04:00
|
|
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
2016-06-03 06:57:26 -04:00
|
|
|
from synapse.http.server import JsonResource
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.http.site import SynapseSite
|
2018-05-31 05:04:50 -04:00
|
|
|
from synapse.metrics import RegistryProxy
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
2018-10-12 09:14:08 -04:00
|
|
|
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
2016-06-03 06:57:26 -04:00
|
|
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
|
|
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
2016-08-30 04:40:32 -04:00
|
|
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
2017-01-27 08:36:39 -05:00
|
|
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
|
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
2018-07-09 02:09:20 -04:00
|
|
|
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
|
|
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
|
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
|
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
2016-09-21 06:46:28 -04:00
|
|
|
from synapse.replication.slave.storage.room import RoomStore
|
2017-03-27 11:33:44 -04:00
|
|
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
2019-03-27 11:18:28 -04:00
|
|
|
from synapse.replication.tcp.streams.events import EventsStreamEventRow
|
2017-08-15 10:57:46 -04:00
|
|
|
from synapse.rest.client.v1 import events
|
|
|
|
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
|
|
|
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
|
|
|
from synapse.rest.client.v2_alpha import sync
|
2016-06-03 06:57:26 -04:00
|
|
|
from synapse.server import HomeServer
|
|
|
|
from synapse.storage.engines import create_engine
|
2017-04-11 11:07:33 -04:00
|
|
|
from synapse.storage.presence import UserPresenceState
|
2016-06-03 06:57:26 -04:00
|
|
|
from synapse.util.httpresourcetree import create_resource_tree
|
2018-04-27 06:29:27 -04:00
|
|
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
2016-06-03 06:57:26 -04:00
|
|
|
from synapse.util.manhole import manhole
|
|
|
|
from synapse.util.stringutils import random_string
|
|
|
|
from synapse.util.versionstring import get_version_string
|
2018-04-15 10:51:07 -04:00
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
logger = logging.getLogger("synapse.app.synchrotron")
|
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronSlavedStore(
|
|
|
|
SlavedReceiptsStore,
|
|
|
|
SlavedAccountDataStore,
|
|
|
|
SlavedApplicationServiceStore,
|
|
|
|
SlavedRegistrationStore,
|
|
|
|
SlavedFilteringStore,
|
|
|
|
SlavedPresenceStore,
|
2017-07-20 12:13:18 -04:00
|
|
|
SlavedGroupServerStore,
|
2016-08-30 04:40:32 -04:00
|
|
|
SlavedDeviceInboxStore,
|
2017-01-27 08:36:39 -05:00
|
|
|
SlavedDeviceStore,
|
2018-02-27 07:01:36 -05:00
|
|
|
SlavedPushRuleStore,
|
|
|
|
SlavedEventStore,
|
2017-06-27 09:58:10 -04:00
|
|
|
SlavedClientIpStore,
|
2016-09-21 06:46:28 -04:00
|
|
|
RoomStore,
|
2016-06-03 09:55:01 -04:00
|
|
|
BaseSlavedStore,
|
2016-06-03 06:57:26 -04:00
|
|
|
):
|
2018-07-24 11:20:14 -04:00
|
|
|
pass
|
2017-02-13 06:16:53 -05:00
|
|
|
|
2016-06-03 13:03:40 -04:00
|
|
|
|
2016-06-03 09:24:19 -04:00
|
|
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
class SynchrotronPresence(object):
|
|
|
|
def __init__(self, hs):
|
2017-04-04 06:09:26 -04:00
|
|
|
self.hs = hs
|
2016-08-12 10:31:44 -04:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2016-06-03 06:57:26 -04:00
|
|
|
self.http_client = hs.get_simple_http_client()
|
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self.user_to_num_current_syncs = {}
|
|
|
|
self.clock = hs.get_clock()
|
2016-08-12 10:31:44 -04:00
|
|
|
self.notifier = hs.get_notifier()
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
active_presence = self.store.take_presence_startup_info()
|
2019-06-20 05:32:02 -04:00
|
|
|
self.user_to_current_state = {state.user_id: state for state in active_presence}
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2017-04-05 08:25:40 -04:00
|
|
|
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
|
|
|
# but we haven't notified the master of that yet
|
|
|
|
self.users_going_offline = {}
|
|
|
|
|
|
|
|
self._send_stop_syncing_loop = self.clock.looping_call(
|
|
|
|
self.send_stop_syncing, 10 * 1000
|
|
|
|
)
|
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
self.process_id = random_string(16)
|
|
|
|
logger.info("Presence process_id is %r", self.process_id)
|
|
|
|
|
2017-04-04 06:09:26 -04:00
|
|
|
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
2018-08-17 11:08:45 -04:00
|
|
|
if self.hs.config.use_presence:
|
|
|
|
self.hs.get_tcp_replication().send_user_sync(
|
|
|
|
user_id, is_syncing, last_sync_ms
|
|
|
|
)
|
2016-06-03 10:02:27 -04:00
|
|
|
|
2017-04-05 08:25:40 -04:00
|
|
|
def mark_as_coming_online(self, user_id):
|
|
|
|
"""A user has started syncing. Send a UserSync to the master, unless they
|
|
|
|
had recently stopped syncing.
|
2017-04-06 08:11:21 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id (str)
|
2017-04-05 08:25:40 -04:00
|
|
|
"""
|
|
|
|
going_offline = self.users_going_offline.pop(user_id, None)
|
|
|
|
if not going_offline:
|
2017-04-06 08:35:00 -04:00
|
|
|
# Safe to skip because we haven't yet told the master they were offline
|
2017-04-05 08:25:40 -04:00
|
|
|
self.send_user_sync(user_id, True, self.clock.time_msec())
|
|
|
|
|
|
|
|
def mark_as_going_offline(self, user_id):
|
|
|
|
"""A user has stopped syncing. We wait before notifying the master as
|
|
|
|
its likely they'll come back soon. This allows us to avoid sending
|
|
|
|
a stopped syncing immediately followed by a started syncing notification
|
|
|
|
to the master
|
2017-04-06 08:11:21 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id (str)
|
2017-04-05 08:25:40 -04:00
|
|
|
"""
|
|
|
|
self.users_going_offline[user_id] = self.clock.time_msec()
|
|
|
|
|
|
|
|
def send_stop_syncing(self):
|
|
|
|
"""Check if there are any users who have stopped syncing a while ago
|
|
|
|
and haven't come back yet. If there are poke the master about them.
|
|
|
|
"""
|
|
|
|
now = self.clock.time_msec()
|
2018-10-12 09:14:08 -04:00
|
|
|
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
2017-04-05 08:25:40 -04:00
|
|
|
if now - last_sync_ms > 10 * 1000:
|
|
|
|
self.users_going_offline.pop(user_id, None)
|
|
|
|
self.send_user_sync(user_id, False, last_sync_ms)
|
|
|
|
|
2016-08-11 06:48:30 -04:00
|
|
|
def set_state(self, user, state, ignore_status_msg=False):
|
2016-06-03 06:57:26 -04:00
|
|
|
# TODO Hows this supposed to work?
|
|
|
|
pass
|
|
|
|
|
2018-10-12 09:14:08 -04:00
|
|
|
get_states = __func__(PresenceHandler.get_states)
|
|
|
|
get_state = __func__(PresenceHandler.get_state)
|
|
|
|
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
def user_syncing(self, user_id, affect_presence):
|
|
|
|
if affect_presence:
|
|
|
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
|
|
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
2017-03-27 11:33:44 -04:00
|
|
|
|
2017-04-04 06:09:26 -04:00
|
|
|
# If we went from no in flight sync to some, notify replication
|
|
|
|
if self.user_to_num_current_syncs[user_id] == 1:
|
2017-04-05 08:25:40 -04:00
|
|
|
self.mark_as_coming_online(user_id)
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
def _end():
|
2016-06-06 11:37:12 -04:00
|
|
|
# We check that the user_id is in user_to_num_current_syncs because
|
|
|
|
# user_to_num_current_syncs may have been cleared if we are
|
|
|
|
# shutting down.
|
|
|
|
if affect_presence and user_id in self.user_to_num_current_syncs:
|
2016-06-03 06:57:26 -04:00
|
|
|
self.user_to_num_current_syncs[user_id] -= 1
|
|
|
|
|
2017-04-04 06:09:26 -04:00
|
|
|
# If we went from one in flight sync to non, notify replication
|
|
|
|
if self.user_to_num_current_syncs[user_id] == 0:
|
2017-04-05 08:25:40 -04:00
|
|
|
self.mark_as_going_offline(user_id)
|
2017-03-27 11:33:44 -04:00
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def _user_syncing():
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
_end()
|
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
return defer.succeed(_user_syncing())
|
2016-06-03 09:24:19 -04:00
|
|
|
|
2016-08-12 10:31:44 -04:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def notify_from_replication(self, states, stream_id):
|
2017-04-11 10:30:02 -04:00
|
|
|
parties = yield get_interested_parties(self.store, states)
|
2017-04-10 11:48:30 -04:00
|
|
|
room_ids_to_states, users_to_states = parties
|
2016-08-12 10:31:44 -04:00
|
|
|
|
|
|
|
self.notifier.on_new_event(
|
2019-06-20 05:32:02 -04:00
|
|
|
"presence_key",
|
|
|
|
stream_id,
|
|
|
|
rooms=room_ids_to_states.keys(),
|
|
|
|
users=users_to_states.keys(),
|
2016-08-12 10:31:44 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2017-03-27 11:33:44 -04:00
|
|
|
def process_replication_rows(self, token, rows):
|
2019-06-20 05:32:02 -04:00
|
|
|
states = [
|
|
|
|
UserPresenceState(
|
|
|
|
row.user_id,
|
|
|
|
row.state,
|
|
|
|
row.last_active_ts,
|
|
|
|
row.last_federation_update_ts,
|
|
|
|
row.last_user_sync_ts,
|
|
|
|
row.status_msg,
|
|
|
|
row.currently_active,
|
|
|
|
)
|
|
|
|
for row in rows
|
|
|
|
]
|
2016-08-12 10:31:44 -04:00
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
for state in states:
|
2018-10-12 09:14:08 -04:00
|
|
|
self.user_to_current_state[state.user_id] = state
|
2017-03-27 11:33:44 -04:00
|
|
|
|
|
|
|
stream_id = token
|
|
|
|
yield self.notify_from_replication(states, stream_id)
|
|
|
|
|
|
|
|
def get_currently_syncing_users(self):
|
2018-08-17 11:08:45 -04:00
|
|
|
if self.hs.config.use_presence:
|
|
|
|
return [
|
2019-06-20 05:32:02 -04:00
|
|
|
user_id
|
|
|
|
for user_id, count in iteritems(self.user_to_num_current_syncs)
|
2018-08-17 11:08:45 -04:00
|
|
|
if count > 0
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
return set()
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronTyping(object):
|
|
|
|
def __init__(self, hs):
|
|
|
|
self._latest_room_serial = 0
|
2018-11-02 09:19:23 -04:00
|
|
|
self._reset()
|
|
|
|
|
|
|
|
def _reset(self):
|
|
|
|
"""
|
|
|
|
Reset the typing handler's data caches.
|
|
|
|
"""
|
|
|
|
# map room IDs to serial numbers
|
2016-06-03 06:57:26 -04:00
|
|
|
self._room_serials = {}
|
2018-11-02 09:19:23 -04:00
|
|
|
# map room IDs to sets of users currently typing
|
2016-06-03 06:57:26 -04:00
|
|
|
self._room_typing = {}
|
|
|
|
|
|
|
|
def stream_positions(self):
|
2016-09-09 12:22:07 -04:00
|
|
|
# We must update this typing token from the response of the previous
|
|
|
|
# sync. In particular, the stream id may "reset" back to zero/a low
|
|
|
|
# value which we *must* use for the next replication request.
|
2016-06-03 06:57:26 -04:00
|
|
|
return {"typing": self._latest_room_serial}
|
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
def process_replication_rows(self, token, rows):
|
2018-11-02 09:19:23 -04:00
|
|
|
if self._latest_room_serial > token:
|
|
|
|
# The master has gone backwards. To prevent inconsistent data, just
|
|
|
|
# clear everything.
|
|
|
|
self._reset()
|
|
|
|
|
|
|
|
# Set the latest serial token to whatever the server gave us.
|
2017-03-27 11:33:44 -04:00
|
|
|
self._latest_room_serial = token
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
for row in rows:
|
|
|
|
self._room_serials[row.room_id] = token
|
2017-04-05 06:34:20 -04:00
|
|
|
self._room_typing[row.room_id] = row.user_ids
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronApplicationService(object):
|
|
|
|
def notify_interested_services(self, event):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronServer(HomeServer):
|
2018-08-28 08:39:49 -04:00
|
|
|
DATASTORE_CLASS = SynchrotronSlavedStore
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
def _listen_http(self, listener_config):
|
|
|
|
port = listener_config["port"]
|
2017-01-10 12:21:41 -05:00
|
|
|
bind_addresses = listener_config["bind_addresses"]
|
2016-06-03 06:57:26 -04:00
|
|
|
site_tag = listener_config.get("tag", port)
|
|
|
|
resources = {}
|
|
|
|
for res in listener_config["resources"]:
|
|
|
|
for name in res["names"]:
|
|
|
|
if name == "metrics":
|
2018-05-31 05:04:50 -04:00
|
|
|
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
2016-06-03 06:57:26 -04:00
|
|
|
elif name == "client":
|
|
|
|
resource = JsonResource(self, canonical_json=False)
|
|
|
|
sync.register_servlets(self, resource)
|
2016-08-12 10:31:44 -04:00
|
|
|
events.register_servlets(self, resource)
|
2016-09-21 06:46:28 -04:00
|
|
|
InitialSyncRestServlet(self).register(resource)
|
|
|
|
RoomInitialSyncRestServlet(self).register(resource)
|
2019-06-20 05:32:02 -04:00
|
|
|
resources.update(
|
|
|
|
{
|
|
|
|
"/_matrix/client/r0": resource,
|
|
|
|
"/_matrix/client/unstable": resource,
|
|
|
|
"/_matrix/client/v2_alpha": resource,
|
|
|
|
"/_matrix/client/api/v1": resource,
|
|
|
|
}
|
|
|
|
)
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2018-03-23 06:32:50 -04:00
|
|
|
root_resource = create_resource_tree(resources, NoResource())
|
2016-12-18 14:42:43 -05:00
|
|
|
|
2017-12-17 08:15:30 -05:00
|
|
|
_base.listen_tcp(
|
|
|
|
bind_addresses,
|
|
|
|
port,
|
|
|
|
SynapseSite(
|
|
|
|
"synapse.access.http.%s" % (site_tag,),
|
|
|
|
site_tag,
|
|
|
|
listener_config,
|
|
|
|
root_resource,
|
2018-05-10 13:46:59 -04:00
|
|
|
self.version_string,
|
2019-06-20 05:32:02 -04:00
|
|
|
),
|
2017-12-17 08:15:30 -05:00
|
|
|
)
|
2016-12-18 14:42:43 -05:00
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
|
|
|
|
2016-06-16 06:06:12 -04:00
|
|
|
def start_listening(self, listeners):
|
|
|
|
for listener in listeners:
|
2016-06-03 06:57:26 -04:00
|
|
|
if listener["type"] == "http":
|
|
|
|
self._listen_http(listener)
|
|
|
|
elif listener["type"] == "manhole":
|
2017-12-17 08:15:30 -05:00
|
|
|
_base.listen_tcp(
|
|
|
|
listener["bind_addresses"],
|
|
|
|
listener["port"],
|
|
|
|
manhole(
|
2019-06-20 05:32:02 -04:00
|
|
|
username="matrix", password="rabbithole", globals={"hs": self}
|
|
|
|
),
|
2017-12-17 08:15:30 -05:00
|
|
|
)
|
2018-05-31 05:04:50 -04:00
|
|
|
elif listener["type"] == "metrics":
|
|
|
|
if not self.get_config().enable_metrics:
|
2019-06-20 05:32:02 -04:00
|
|
|
logger.warn(
|
|
|
|
(
|
|
|
|
"Metrics listener configured, but "
|
|
|
|
"enable_metrics is not True!"
|
|
|
|
)
|
|
|
|
)
|
2018-05-31 05:04:50 -04:00
|
|
|
else:
|
2019-06-20 05:32:02 -04:00
|
|
|
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
2016-06-03 06:57:26 -04:00
|
|
|
else:
|
|
|
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
self.get_tcp_replication().start_replication(self)
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
def build_tcp_replication(self):
|
|
|
|
return SyncReplicationHandler(self)
|
2017-01-27 08:36:39 -05:00
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
def build_presence_handler(self):
|
|
|
|
return SynchrotronPresence(self)
|
2017-01-27 08:36:39 -05:00
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
def build_typing_handler(self):
|
|
|
|
return SynchrotronTyping(self)
|
2017-01-27 08:36:39 -05:00
|
|
|
|
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
class SyncReplicationHandler(ReplicationClientHandler):
|
|
|
|
def __init__(self, hs):
|
|
|
|
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
2017-01-27 08:36:39 -05:00
|
|
|
|
2017-03-27 11:33:44 -04:00
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self.typing_handler = hs.get_typing_handler()
|
2017-11-23 19:34:56 -05:00
|
|
|
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
2017-03-27 11:33:44 -04:00
|
|
|
self.presence_handler = hs.get_presence_handler()
|
|
|
|
self.notifier = hs.get_notifier()
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2018-08-16 19:43:43 -04:00
|
|
|
@defer.inlineCallbacks
|
2017-03-27 11:33:44 -04:00
|
|
|
def on_rdata(self, stream_name, token, rows):
|
2018-08-16 19:43:43 -04:00
|
|
|
yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
2018-04-27 06:29:27 -04:00
|
|
|
run_in_background(self.process_and_notify, stream_name, token, rows)
|
2017-03-27 11:33:44 -04:00
|
|
|
|
|
|
|
def get_streams_to_replicate(self):
|
|
|
|
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
|
|
|
args.update(self.typing_handler.stream_positions())
|
|
|
|
return args
|
|
|
|
|
|
|
|
def get_currently_syncing_users(self):
|
|
|
|
return self.presence_handler.get_currently_syncing_users()
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2017-04-04 10:22:54 -04:00
|
|
|
def process_and_notify(self, stream_name, token, rows):
|
2018-04-27 06:07:40 -04:00
|
|
|
try:
|
|
|
|
if stream_name == "events":
|
|
|
|
# We shouldn't get multiple rows per token for events stream, so
|
|
|
|
# we don't need to optimise this for multiple rows.
|
|
|
|
for row in rows:
|
2019-03-27 11:18:28 -04:00
|
|
|
if row.type != EventsStreamEventRow.TypeId:
|
|
|
|
continue
|
|
|
|
event = yield self.store.get_event(row.data.event_id)
|
2018-04-27 06:07:40 -04:00
|
|
|
extra_users = ()
|
|
|
|
if event.type == EventTypes.Member:
|
|
|
|
extra_users = (event.state_key,)
|
|
|
|
max_token = self.store.get_room_max_stream_ordering()
|
|
|
|
self.notifier.on_new_room_event(
|
|
|
|
event, token, max_token, extra_users
|
|
|
|
)
|
|
|
|
elif stream_name == "push_rules":
|
|
|
|
self.notifier.on_new_event(
|
2019-06-20 05:32:02 -04:00
|
|
|
"push_rules_key", token, users=[row.user_id for row in rows]
|
2017-03-27 11:33:44 -04:00
|
|
|
)
|
2019-06-20 05:32:02 -04:00
|
|
|
elif stream_name in ("account_data", "tag_account_data"):
|
2017-03-27 11:33:44 -04:00
|
|
|
self.notifier.on_new_event(
|
2019-06-20 05:32:02 -04:00
|
|
|
"account_data_key", token, users=[row.user_id for row in rows]
|
2017-03-27 11:33:44 -04:00
|
|
|
)
|
2018-04-27 06:07:40 -04:00
|
|
|
elif stream_name == "receipts":
|
|
|
|
self.notifier.on_new_event(
|
2019-06-20 05:32:02 -04:00
|
|
|
"receipt_key", token, rooms=[row.room_id for row in rows]
|
2018-04-27 06:07:40 -04:00
|
|
|
)
|
|
|
|
elif stream_name == "typing":
|
|
|
|
self.typing_handler.process_replication_rows(token, rows)
|
|
|
|
self.notifier.on_new_event(
|
2019-06-20 05:32:02 -04:00
|
|
|
"typing_key", token, rooms=[row.room_id for row in rows]
|
2018-04-27 06:07:40 -04:00
|
|
|
)
|
|
|
|
elif stream_name == "to_device":
|
|
|
|
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
|
|
|
if entities:
|
2019-06-20 05:32:02 -04:00
|
|
|
self.notifier.on_new_event("to_device_key", token, users=entities)
|
2018-04-27 06:07:40 -04:00
|
|
|
elif stream_name == "device_lists":
|
|
|
|
all_room_ids = set()
|
|
|
|
for row in rows:
|
|
|
|
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
|
|
|
all_room_ids.update(room_ids)
|
2019-06-20 05:32:02 -04:00
|
|
|
self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
|
2018-04-27 06:07:40 -04:00
|
|
|
elif stream_name == "presence":
|
|
|
|
yield self.presence_handler.process_replication_rows(token, rows)
|
|
|
|
elif stream_name == "receipts":
|
|
|
|
self.notifier.on_new_event(
|
2019-06-20 05:32:02 -04:00
|
|
|
"groups_key", token, users=[row.user_id for row in rows]
|
2018-04-27 06:07:40 -04:00
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
logger.exception("Error processing replication")
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2016-06-16 07:53:15 -04:00
|
|
|
|
2016-06-16 12:29:50 -04:00
|
|
|
def start(config_options):
|
2016-06-03 06:57:26 -04:00
|
|
|
try:
|
2019-06-20 05:32:02 -04:00
|
|
|
config = HomeServerConfig.load_config("Synapse synchrotron", config_options)
|
2016-06-03 06:57:26 -04:00
|
|
|
except ConfigError as e:
|
2018-09-27 07:38:50 -04:00
|
|
|
sys.stderr.write("\n" + str(e) + "\n")
|
2016-06-03 06:57:26 -04:00
|
|
|
sys.exit(1)
|
|
|
|
|
2016-06-16 12:29:50 -04:00
|
|
|
assert config.worker_app == "synapse.app.synchrotron"
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2017-03-10 10:16:50 -05:00
|
|
|
setup_logging(config, use_worker_options=True)
|
2016-06-16 06:06:12 -04:00
|
|
|
|
2016-11-08 06:07:18 -05:00
|
|
|
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
|
|
|
2016-06-03 06:57:26 -04:00
|
|
|
database_engine = create_engine(config.database_config)
|
|
|
|
|
|
|
|
ss = SynchrotronServer(
|
|
|
|
config.server_name,
|
|
|
|
db_config=config.database_config,
|
|
|
|
config=config,
|
2016-08-05 11:36:07 -04:00
|
|
|
version_string="Synapse/" + get_version_string(synapse),
|
2016-06-03 06:57:26 -04:00
|
|
|
database_engine=database_engine,
|
|
|
|
application_service_handler=SynchrotronApplicationService(),
|
|
|
|
)
|
|
|
|
|
|
|
|
ss.setup()
|
2019-02-08 12:25:57 -05:00
|
|
|
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
2016-06-03 06:57:26 -04:00
|
|
|
|
2017-08-15 10:57:46 -04:00
|
|
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
2016-06-03 06:57:26 -04:00
|
|
|
|
|
|
|
|
2019-06-20 05:32:02 -04:00
|
|
|
if __name__ == "__main__":
|
2016-06-03 06:57:26 -04:00
|
|
|
with LoggingContext("main"):
|
2016-06-16 12:29:50 -04:00
|
|
|
start(sys.argv[1:])
|