mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-08-19 02:37:52 -04:00
Merge branch 'develop' into rav/url_preview_limit_title
This commit is contained in:
commit
55a7da247a
231 changed files with 4527 additions and 1822 deletions
|
@ -497,7 +497,7 @@ class Auth(object):
|
|||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
|
|
|
@ -138,3 +138,10 @@ class LimitBlockingTypes(object):
|
|||
|
||||
MONTHLY_ACTIVE_USER = "monthly_active_user"
|
||||
HS_DISABLED = "hs_disabled"
|
||||
|
||||
|
||||
class EventContentFields(object):
|
||||
"""Fields found in events' content, regardless of type."""
|
||||
|
||||
# Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
|
||||
LABELS = "org.matrix.labels"
|
||||
|
|
|
@ -20,6 +20,7 @@ from jsonschema import FormatChecker
|
|||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import RoomID, UserID
|
||||
|
@ -66,6 +67,10 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
|||
"contains_url": {"type": "boolean"},
|
||||
"lazy_load_members": {"type": "boolean"},
|
||||
"include_redundant_members": {"type": "boolean"},
|
||||
# Include or exclude events with the provided labels.
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2326
|
||||
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
||||
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -259,6 +264,9 @@ class Filter(object):
|
|||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
|
||||
self.labels = self.filter_json.get("org.matrix.labels", None)
|
||||
self.not_labels = self.filter_json.get("org.matrix.not_labels", [])
|
||||
|
||||
def filters_all_types(self):
|
||||
return "*" in self.not_types
|
||||
|
||||
|
@ -282,6 +290,7 @@ class Filter(object):
|
|||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
contains_url = False
|
||||
labels = []
|
||||
else:
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
|
@ -300,10 +309,11 @@ class Filter(object):
|
|||
content = event.get("content", {})
|
||||
# check if there is a string url field in the content for filtering purposes
|
||||
contains_url = isinstance(content.get("url"), text_type)
|
||||
labels = content.get(EventContentFields.LABELS, [])
|
||||
|
||||
return self.check_fields(room_id, sender, ev_type, contains_url)
|
||||
return self.check_fields(room_id, sender, ev_type, labels, contains_url)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
def check_fields(self, room_id, sender, event_type, labels, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
|
@ -313,6 +323,7 @@ class Filter(object):
|
|||
"rooms": lambda v: room_id == v,
|
||||
"senders": lambda v: sender == v,
|
||||
"types": lambda v: _matches_wildcard(event_type, v),
|
||||
"labels": lambda v: v in labels,
|
||||
}
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
|
|
|
@ -44,6 +44,8 @@ def check_bind_error(e, address, bind_addresses):
|
|||
bind_addresses (list): Addresses on which the service listens.
|
||||
"""
|
||||
if address == "0.0.0.0" and "::" in bind_addresses:
|
||||
logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]")
|
||||
logger.warning(
|
||||
"Failed to listen on 0.0.0.0, continuing because listening on [::]"
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
|
|
@ -94,7 +94,7 @@ class AppserviceServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -103,7 +103,7 @@ class AppserviceServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ class ClientReaderServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -162,7 +162,7 @@ class ClientReaderServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ class EventCreatorServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -156,7 +156,7 @@ class EventCreatorServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ class FederationReaderServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -141,7 +141,7 @@ class FederationReaderServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ class FederationSenderServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -132,7 +132,7 @@ class FederationSenderServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ class FrontendProxyServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -213,7 +213,7 @@ class FrontendProxyServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -19,12 +19,13 @@ from __future__ import print_function
|
|||
|
||||
import gc
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import resource
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import psutil
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
|
@ -282,7 +283,7 @@ class SynapseHomeServer(HomeServer):
|
|||
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -291,7 +292,7 @@ class SynapseHomeServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
def run_startup_checks(self, db_conn, database_engine):
|
||||
all_users_native = are_all_users_on_domain(
|
||||
|
@ -471,6 +472,87 @@ class SynapseService(service.Service):
|
|||
return self._port.stopListening()
|
||||
|
||||
|
||||
# Contains the list of processes we will be monitoring
|
||||
# currently either 0 or 1
|
||||
_stats_process = []
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - hs.start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
stats["python_version"] = "{}.{}.{}".format(
|
||||
version.major, version.minor, version.micro
|
||||
)
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
|
||||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in iteritems(r30_results):
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||
stats["event_cache_size"] = hs.config.event_cache_size
|
||||
|
||||
#
|
||||
# Performance statistics
|
||||
#
|
||||
old = stats_process[0]
|
||||
new = (now, resource.getrusage(resource.RUSAGE_SELF))
|
||||
stats_process[0] = new
|
||||
|
||||
# Get RSS in bytes
|
||||
stats["memory_rss"] = new[1].ru_maxrss
|
||||
|
||||
# Get CPU time in % of a single core, not % of all cores
|
||||
used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - (
|
||||
old[1].ru_utime + old[1].ru_stime
|
||||
)
|
||||
if used_cpu_time == 0 or new[0] == old[0]:
|
||||
stats["cpu_average"] = 0
|
||||
else:
|
||||
stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100)
|
||||
|
||||
#
|
||||
# Database version
|
||||
#
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
|
||||
try:
|
||||
yield hs.get_proxied_http_client().put_json(
|
||||
hs.config.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Error reporting stats: %s", e)
|
||||
|
||||
|
||||
def run(hs):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
|
@ -497,91 +579,19 @@ def run(hs):
|
|||
reactor.run = profile(reactor.run)
|
||||
|
||||
clock = hs.get_clock()
|
||||
start_time = clock.time()
|
||||
|
||||
stats = {}
|
||||
|
||||
# Contains the list of processes we will be monitoring
|
||||
# currently either 0 or 1
|
||||
stats_process = []
|
||||
def performance_stats_init():
|
||||
_stats_process.clear()
|
||||
_stats_process.append(
|
||||
(int(hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF)))
|
||||
)
|
||||
|
||||
def start_phone_stats_home():
|
||||
return run_as_background_process("phone_stats_home", phone_stats_home)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
stats["python_version"] = "{}.{}.{}".format(
|
||||
version.major, version.minor, version.micro
|
||||
return run_as_background_process(
|
||||
"phone_stats_home", phone_stats_home, hs, stats
|
||||
)
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
|
||||
stats[
|
||||
"daily_active_rooms"
|
||||
] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in iteritems(r30_results):
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||
stats["event_cache_size"] = hs.config.event_cache_size
|
||||
|
||||
if len(stats_process) > 0:
|
||||
stats["memory_rss"] = 0
|
||||
stats["cpu_average"] = 0
|
||||
for process in stats_process:
|
||||
stats["memory_rss"] += process.memory_info().rss
|
||||
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
logger.info(
|
||||
"Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
|
||||
)
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
hs.config.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
|
||||
def performance_stats_init():
|
||||
try:
|
||||
process = psutil.Process()
|
||||
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||
# so the next request will use this as the initial point.
|
||||
process.memory_info().rss
|
||||
process.cpu_percent(interval=None)
|
||||
logger.info("report_stats can use psutil")
|
||||
stats_process.append(process)
|
||||
except (AttributeError):
|
||||
logger.warning("Unable to read memory/cpu stats. Disabling reporting.")
|
||||
|
||||
def generate_user_daily_visit_stats():
|
||||
return run_as_background_process(
|
||||
|
@ -626,7 +636,7 @@ def run(hs):
|
|||
|
||||
if hs.config.report_stats:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
|
||||
|
||||
# We need to defer this init for the cases that we daemonize
|
||||
# otherwise the process ID we get is that of the non-daemon process
|
||||
|
@ -634,7 +644,7 @@ def run(hs):
|
|||
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(5 * 60, start_phone_stats_home)
|
||||
clock.call_later(5 * 60, start_phone_stats_home, hs, stats)
|
||||
|
||||
_base.start_reactor(
|
||||
"synapse-homeserver",
|
||||
|
|
|
@ -120,7 +120,7 @@ class MediaRepositoryServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -129,7 +129,7 @@ class MediaRepositoryServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ class PusherServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -123,7 +123,7 @@ class PusherServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -326,7 +326,7 @@ class SynchrotronServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -335,7 +335,7 @@ class SynchrotronServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ class UserDirectoryServer(HomeServer):
|
|||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
|
@ -159,7 +159,7 @@ class UserDirectoryServer(HomeServer):
|
|||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
|
|
@ -94,7 +94,9 @@ class ApplicationService(object):
|
|||
ip_range_whitelist=None,
|
||||
):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.url = (
|
||||
url.rstrip("/") if isinstance(url, str) else None
|
||||
) # url must not end with a slash
|
||||
self.hs_token = hs_token
|
||||
self.sender = sender
|
||||
self.server_name = hostname
|
||||
|
|
|
@ -125,7 +125,7 @@ class KeyConfig(Config):
|
|||
|
||||
# if neither trusted_key_servers nor perspectives are given, use the default.
|
||||
if "perspectives" not in config and "trusted_key_servers" not in config:
|
||||
logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
|
||||
logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
|
||||
key_servers = [{"server_name": "matrix.org"}]
|
||||
else:
|
||||
key_servers = config.get("trusted_key_servers", [])
|
||||
|
@ -156,7 +156,7 @@ class KeyConfig(Config):
|
|||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing macaroon_secret_key")
|
||||
logger.warning("Config is missing macaroon_secret_key")
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ def _reload_stdlib_logging(*args, log_config=None):
|
|||
logger = logging.getLogger("")
|
||||
|
||||
if not log_config:
|
||||
logger.warn("Reloaded a blank config?")
|
||||
logger.warning("Reloaded a blank config?")
|
||||
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
|
@ -234,8 +234,8 @@ def setup_logging(
|
|||
|
||||
# make sure that the first thing we log is a thing we can grep backwards
|
||||
# for
|
||||
logging.warn("***** STARTING SERVER *****")
|
||||
logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
|
||||
logging.warning("***** STARTING SERVER *****")
|
||||
logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
|
||||
logging.info("Server hostname: %s", config.server_name)
|
||||
|
||||
return logger
|
||||
|
|
|
@ -300,7 +300,7 @@ class RegistrationConfig(Config):
|
|||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||
#
|
||||
account_threepid_delegates:
|
||||
#email: https://example.com # Delegate email sending to example.org
|
||||
#email: https://example.com # Delegate email sending to example.com
|
||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
|
|
|
@ -125,9 +125,11 @@ def compute_event_signature(event_dict, signature_name, signing_key):
|
|||
redact_json = prune_event_dict(event_dict)
|
||||
redact_json.pop("age_ts", None)
|
||||
redact_json.pop("unsigned", None)
|
||||
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
||||
redact_json = sign_json(redact_json, signature_name, signing_key)
|
||||
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
|
||||
return redact_json["signatures"]
|
||||
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
|||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
logger.warn("Trusting event: %s", event.event_id)
|
||||
logger.warning("Trusting event: %s", event.event_id)
|
||||
return
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
|
|
|
@ -12,104 +12,103 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Dict, Optional, Tuple, Union
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
@attr.s(slots=True)
|
||||
class EventContext:
|
||||
"""
|
||||
Holds information relevant to persisting an event
|
||||
|
||||
Attributes:
|
||||
state_group (int|None): state group id, if the state has been stored
|
||||
as a state group. This is usually only None if e.g. the event is
|
||||
an outlier.
|
||||
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||
False
|
||||
rejected: A rejection reason if the event was rejected, else False
|
||||
|
||||
push_actions (list[(str, list[object])]): list of (user_id, actions)
|
||||
tuples
|
||||
state_group: The ID of the state group for this event. Note that state events
|
||||
are persisted with a state group which includes the new event, so this is
|
||||
effectively the state *after* the event in question.
|
||||
|
||||
prev_group (int): Previously persisted state group. ``None`` for an
|
||||
outlier.
|
||||
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
||||
(type, state_key) -> event_id. ``None`` for an outlier.
|
||||
For a *rejected* state event, where the state of the rejected event is
|
||||
ignored, this state_group should never make it into the
|
||||
event_to_state_groups table. Indeed, inspecting this value for a rejected
|
||||
state event is almost certainly incorrect.
|
||||
|
||||
prev_state_events (?): XXX: is this ever set to anything other than
|
||||
the empty list?
|
||||
For an outlier, where we don't have the state at the event, this will be
|
||||
None.
|
||||
|
||||
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
|
||||
None does not necessarily mean that ``state_group`` does not have
|
||||
a prev_group!
|
||||
|
||||
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
|
||||
will always also be ``None``.
|
||||
|
||||
Note that this *not* (necessarily) the state group associated with
|
||||
``_prev_state_ids``.
|
||||
|
||||
delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
|
||||
and ``state_group``.
|
||||
|
||||
app_service: If this event is being sent by a (local) application service, that
|
||||
app service.
|
||||
|
||||
_current_state_ids: The room state map, including this event - ie, the state
|
||||
in ``state_group``.
|
||||
|
||||
_current_state_ids (dict[(str, str), str]|None):
|
||||
The current state map including the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_prev_state_ids (dict[(str, str), str]|None):
|
||||
The current state map excluding the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
FIXME: what is this for an outlier? it seems ill-defined. It seems like
|
||||
it could be either {}, or the state we were given by the remote
|
||||
server, depending on $THINGS
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||
on-demand: it will be None until that happens.
|
||||
|
||||
_prev_state_ids: The room state map, excluding this event. For a non-state
|
||||
event, this will be the same as _current_state_events.
|
||||
|
||||
Note that it is a completely different thing to prev_group!
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
FIXME: again, what is this for an outlier?
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
Only set when state has not been fetched yet.
|
||||
|
||||
_event_state_key (str|None): The state_key of the event the context is
|
||||
associated with. Only set when state has not been fetched yet.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
Only set when state has not been fetched yet.
|
||||
As with _current_state_ids, this is a private attribute. It should be
|
||||
accessed via get_prev_state_ids.
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"state_group",
|
||||
"rejected",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
"app_service",
|
||||
"_current_state_ids",
|
||||
"_prev_state_ids",
|
||||
"_prev_state_id",
|
||||
"_event_type",
|
||||
"_event_state_key",
|
||||
"_fetching_state_deferred",
|
||||
]
|
||||
rejected = attr.ib(default=False, type=Union[bool, str])
|
||||
state_group = attr.ib(default=None, type=Optional[int])
|
||||
prev_group = attr.ib(default=None, type=Optional[int])
|
||||
delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
|
||||
app_service = attr.ib(default=None, type=Optional[ApplicationService])
|
||||
|
||||
def __init__(self):
|
||||
self.prev_state_events = []
|
||||
self.rejected = False
|
||||
self.app_service = None
|
||||
_current_state_ids = attr.ib(
|
||||
default=None, type=Optional[Dict[Tuple[str, str], str]]
|
||||
)
|
||||
_prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
|
||||
):
|
||||
context = EventContext()
|
||||
|
||||
# The current state including the current event
|
||||
context._current_state_ids = current_state_ids
|
||||
# The current state excluding the current event
|
||||
context._prev_state_ids = prev_state_ids
|
||||
context.state_group = state_group
|
||||
|
||||
context._prev_state_id = None
|
||||
context._event_type = None
|
||||
context._event_state_key = None
|
||||
context._fetching_state_deferred = defer.succeed(None)
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
context.prev_group = prev_group
|
||||
context.delta_ids = delta_ids
|
||||
|
||||
return context
|
||||
return EventContext(
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
state_group=state_group,
|
||||
prev_group=prev_group,
|
||||
delta_ids=delta_ids,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize(self, event, store):
|
||||
|
@ -141,7 +140,6 @@ class EventContext(object):
|
|||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
}
|
||||
|
||||
|
@ -157,24 +155,17 @@ class EventContext(object):
|
|||
Returns:
|
||||
EventContext
|
||||
"""
|
||||
context = EventContext()
|
||||
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
context._prev_state_id = input["prev_state_id"]
|
||||
context._event_type = input["event_type"]
|
||||
context._event_state_key = input["event_state_key"]
|
||||
|
||||
context._current_state_ids = None
|
||||
context._prev_state_ids = None
|
||||
context._fetching_state_deferred = None
|
||||
|
||||
context.state_group = input["state_group"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
context = _AsyncEventContextImpl(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
prev_state_id=input["prev_state_id"],
|
||||
event_type=input["event_type"],
|
||||
event_state_key=input["event_state_key"],
|
||||
state_group=input["state_group"],
|
||||
prev_group=input["prev_group"],
|
||||
delta_ids=_decode_state_dict(input["delta_ids"]),
|
||||
rejected=input["rejected"],
|
||||
)
|
||||
|
||||
app_service_id = input["app_service_id"]
|
||||
if app_service_id:
|
||||
|
@ -192,14 +183,7 @@ class EventContext(object):
|
|||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
yield self._ensure_fetched(store)
|
||||
return self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -212,14 +196,7 @@ class EventContext(object):
|
|||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
yield self._ensure_fetched(store)
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
|
@ -233,6 +210,44 @@ class EventContext(object):
|
|||
|
||||
return self._current_state_ids
|
||||
|
||||
def _ensure_fetched(self, store):
|
||||
return defer.succeed(None)
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _AsyncEventContextImpl(EventContext):
|
||||
"""
|
||||
An implementation of EventContext which fetches _current_state_ids and
|
||||
_prev_state_ids from the database on demand.
|
||||
|
||||
Attributes:
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
|
||||
_event_state_key (str): The state_key of the event the context is
|
||||
associated with.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
"""
|
||||
|
||||
_prev_state_id = attr.ib(default=None)
|
||||
_event_type = attr.ib(default=None)
|
||||
_event_state_key = attr.ib(default=None)
|
||||
_fetching_state_deferred = attr.ib(default=None)
|
||||
|
||||
def _ensure_fetched(self, store):
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store
|
||||
)
|
||||
|
||||
return make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _fill_out_state(self, store):
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
|
@ -250,27 +265,6 @@ class EventContext(object):
|
|||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_state(
|
||||
self, state_group, prev_state_ids, current_state_ids, prev_group, delta_ids
|
||||
):
|
||||
"""Replace the state in the context
|
||||
"""
|
||||
|
||||
# We need to make sure we wait for any ongoing fetching of state
|
||||
# to complete so that the updated state doesn't get clobbered
|
||||
if self._fetching_state_deferred:
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
self.state_group = state_group
|
||||
self._prev_state_ids = prev_state_ids
|
||||
self.prev_group = prev_group
|
||||
self._current_state_ids = current_state_ids
|
||||
self.delta_ids = delta_ids
|
||||
|
||||
# We need to ensure that that we've marked as having fetched the state
|
||||
self._fetching_state_deferred = defer.succeed(None)
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -13,6 +14,10 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
from synapse.spam_checker_api import SpamCheckerApi
|
||||
|
||||
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
|
@ -26,7 +31,14 @@ class SpamChecker(object):
|
|||
pass
|
||||
|
||||
if module is not None:
|
||||
self.spam_checker = module(config=config)
|
||||
# Older spam checkers don't accept the `api` argument, so we
|
||||
# try and detect support.
|
||||
spam_args = inspect.getfullargspec(module)
|
||||
if "api" in spam_args.args:
|
||||
api = SpamCheckerApi(hs)
|
||||
self.spam_checker = module(config=config, api=api)
|
||||
else:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
|
|
@ -102,7 +102,7 @@ class FederationBase(object):
|
|||
pass
|
||||
|
||||
if not res:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||
)
|
||||
|
||||
|
@ -173,7 +173,7 @@ class FederationBase(object):
|
|||
return redacted_event
|
||||
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(),
|
||||
|
@ -185,7 +185,7 @@ class FederationBase(object):
|
|||
def errback(failure, pdu):
|
||||
failure.trap(SynapseError)
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
failure.getErrorMessage(),
|
||||
|
|
|
@ -196,7 +196,7 @@ class FederationClient(FederationBase):
|
|||
dest, room_id, extremities, limit
|
||||
)
|
||||
|
||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||
logger.debug("backfill transaction_data=%r", transaction_data)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
@ -522,12 +522,12 @@ class FederationClient(FederationBase):
|
|||
res = yield callback(destination)
|
||||
return res
|
||||
except InvalidResponseError as e:
|
||||
logger.warn("Failed to %s via %s: %s", description, destination, e)
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
|
@ -535,7 +535,9 @@ class FederationClient(FederationBase):
|
|||
e.args[0],
|
||||
)
|
||||
except Exception:
|
||||
logger.warn("Failed to %s via %s", description, destination, exc_info=1)
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=1
|
||||
)
|
||||
|
||||
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||
|
||||
|
@ -553,7 +555,7 @@ class FederationClient(FederationBase):
|
|||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
destinations (Iterable[str]): Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
room_id (str): The room in which the event will happen.
|
||||
user_id (str): The user whose membership is being evented.
|
||||
|
|
|
@ -21,7 +21,6 @@ from six import iteritems
|
|||
from canonicaljson import json
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
|
@ -86,14 +85,12 @@ class FederationServer(FederationBase):
|
|||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
async def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
pdus = await self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
|
@ -101,9 +98,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
return 200, res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_incoming_transaction(self, origin, transaction_data):
|
||||
async def on_incoming_transaction(self, origin, transaction_data):
|
||||
# keep this as early as possible to make the calculated origin ts as
|
||||
# accurate as possible.
|
||||
request_time = self._clock.time_msec()
|
||||
|
@ -118,18 +113,17 @@ class FederationServer(FederationBase):
|
|||
# use a linearizer to ensure that we don't process the same transaction
|
||||
# multiple times in parallel.
|
||||
with (
|
||||
yield self._transaction_linearizer.queue(
|
||||
await self._transaction_linearizer.queue(
|
||||
(origin, transaction.transaction_id)
|
||||
)
|
||||
):
|
||||
result = yield self._handle_incoming_transaction(
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||
async def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||
""" Process an incoming transaction and return the HTTP response
|
||||
|
||||
Args:
|
||||
|
@ -140,7 +134,7 @@ class FederationServer(FederationBase):
|
|||
Returns:
|
||||
Deferred[(int, object)]: http response code and body
|
||||
"""
|
||||
response = yield self.transaction_actions.have_responded(origin, transaction)
|
||||
response = await self.transaction_actions.have_responded(origin, transaction)
|
||||
|
||||
if response:
|
||||
logger.debug(
|
||||
|
@ -151,7 +145,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
# Reject if PDU count > 50 and EDU count > 100
|
||||
# Reject if PDU count > 50 or EDU count > 100
|
||||
if len(transaction.pdus) > 50 or (
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100
|
||||
):
|
||||
|
@ -159,7 +153,7 @@ class FederationServer(FederationBase):
|
|||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
|
||||
response = {}
|
||||
yield self.transaction_actions.set_response(
|
||||
await self.transaction_actions.set_response(
|
||||
origin, transaction, 400, response
|
||||
)
|
||||
return 400, response
|
||||
|
@ -195,7 +189,7 @@ class FederationServer(FederationBase):
|
|||
continue
|
||||
|
||||
try:
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
|
@ -221,13 +215,12 @@ class FederationServer(FederationBase):
|
|||
# require callouts to other servers to fetch missing events), but
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_pdus_for_room(room_id):
|
||||
async def process_pdus_for_room(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
try:
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
|
||||
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
|
@ -237,10 +230,10 @@ class FederationServer(FederationBase):
|
|||
event_id = pdu.event_id
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
yield self._handle_received_pdu(origin, pdu)
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
|
@ -251,36 +244,33 @@ class FederationServer(FederationBase):
|
|||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
yield concurrently_execute(
|
||||
await concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
)
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
yield self.received_edu(origin, edu.edu_type, edu.content)
|
||||
await self.received_edu(origin, edu.edu_type, edu.content)
|
||||
|
||||
response = {"pdus": pdu_results}
|
||||
|
||||
logger.debug("Returning: %s", str(response))
|
||||
|
||||
yield self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
await self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
return 200, response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
async def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
yield self.registry.on_edu(edu_type, origin, content)
|
||||
await self.registry.on_edu(edu_type, origin, content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, room_id, event_id):
|
||||
async def on_context_state_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
|
@ -289,8 +279,8 @@ class FederationServer(FederationBase):
|
|||
# in the cache so we could return it without waiting for the linearizer
|
||||
# - but that's non-trivial to get right, and anyway somewhat defeats
|
||||
# the point of the linearizer.
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.wrap(
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
resp = await self._state_resp_cache.wrap(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute,
|
||||
room_id,
|
||||
|
@ -299,65 +289,60 @@ class FederationServer(FederationBase):
|
|||
|
||||
return 200, resp
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_state_ids_request(self, origin, room_id, event_id):
|
||||
async def on_state_ids_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
||||
|
||||
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_context_state_request_compute(self, room_id, event_id):
|
||||
pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
|
||||
auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
async def _on_context_state_request_compute(self, room_id, event_id):
|
||||
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
||||
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pdu_request(self, origin, event_id):
|
||||
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
||||
async def on_pdu_request(self, origin, event_id):
|
||||
pdu = await self.handler.get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
return 200, self._transaction_from_pdus([pdu]).get_dict()
|
||||
else:
|
||||
return 404, ""
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
async def on_query_request(self, query_type, args):
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
resp = yield self.registry.on_query(query_type, args)
|
||||
resp = await self.registry.on_query(query_type, args)
|
||||
return 200, resp
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||
async def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
if room_version not in supported_versions:
|
||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||
logger.warning(
|
||||
"Room version %s not in %s", room_version, supported_versions
|
||||
)
|
||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||
|
||||
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
|
||||
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
async def on_invite_request(self, origin, content, room_version):
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise SynapseError(
|
||||
400,
|
||||
|
@ -369,28 +354,27 @@ class FederationServer(FederationBase):
|
|||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
ret_pdu = await self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content, room_id):
|
||||
async def on_send_join_request(self, origin, content, room_id):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
res_pdus = await self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return (
|
||||
200,
|
||||
|
@ -402,48 +386,44 @@ class FederationServer(FederationBase):
|
|||
},
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_leave_request(self, origin, room_id, user_id):
|
||||
async def on_make_leave_request(self, origin, room_id, user_id):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_leave_request(self, origin, content, room_id):
|
||||
async def on_send_leave_request(self, origin, content, room_id):
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
yield self.handler.on_send_leave_request(origin, pdu)
|
||||
await self.handler.on_send_leave_request(origin, pdu)
|
||||
return 200, {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
async def on_event_auth(self, origin, room_id, event_id):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
auth_pdus = await self.handler.on_event_auth(event_id)
|
||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||
return 200, res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
async def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
"""
|
||||
Content is a dict with keys::
|
||||
auth_chain (list): A list of events that give the auth chain.
|
||||
|
@ -462,22 +442,22 @@ class FederationServer(FederationBase):
|
|||
Returns:
|
||||
Deferred: Results in `dict` with the same format as `content`
|
||||
"""
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True, room_version=room_version
|
||||
)
|
||||
|
||||
ret = yield self.handler.on_query_auth(
|
||||
ret = await self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
room_id,
|
||||
|
@ -503,16 +483,14 @@ class FederationServer(FederationBase):
|
|||
return self.on_query_request("user_devices", user_id)
|
||||
|
||||
@trace
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_claim_client_keys(self, origin, content):
|
||||
async def on_claim_client_keys(self, origin, content):
|
||||
query = []
|
||||
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||
for device_id, algorithm in device_keys.items():
|
||||
query.append((user_id, device_id, algorithm))
|
||||
|
||||
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
||||
results = yield self.store.claim_e2e_one_time_keys(query)
|
||||
results = await self.store.claim_e2e_one_time_keys(query)
|
||||
|
||||
json_result = {}
|
||||
for user_id, device_keys in results.items():
|
||||
|
@ -536,14 +514,12 @@ class FederationServer(FederationBase):
|
|||
|
||||
return {"one_time_keys": json_result}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_get_missing_events(
|
||||
async def on_get_missing_events(
|
||||
self, origin, room_id, earliest_events, latest_events, limit
|
||||
):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
logger.info(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
|
@ -553,7 +529,7 @@ class FederationServer(FederationBase):
|
|||
limit,
|
||||
)
|
||||
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
missing_events = await self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit
|
||||
)
|
||||
|
||||
|
@ -586,8 +562,7 @@ class FederationServer(FederationBase):
|
|||
destination=None,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_received_pdu(self, origin, pdu):
|
||||
async def _handle_received_pdu(self, origin, pdu):
|
||||
""" Process a PDU received in a federation /send/ transaction.
|
||||
|
||||
If the event is invalid, then this method throws a FederationError.
|
||||
|
@ -640,37 +615,34 @@ class FederationServer(FederationBase):
|
|||
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
||||
|
||||
# We've already checked that we know the room version by this point
|
||||
room_version = yield self.store.get_room_version(pdu.room_id)
|
||||
room_version = await self.store.get_room_version(pdu.room_id)
|
||||
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except SynapseError as e:
|
||||
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
||||
|
||||
yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def exchange_third_party_invite(
|
||||
async def exchange_third_party_invite(
|
||||
self, sender_user_id, target_user_id, room_id, signed
|
||||
):
|
||||
ret = yield self.handler.exchange_third_party_invite(
|
||||
ret = await self.handler.exchange_third_party_invite(
|
||||
sender_user_id, target_user_id, room_id, signed
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_exchange_third_party_invite_request(self, room_id, event_dict):
|
||||
ret = yield self.handler.on_exchange_third_party_invite_request(
|
||||
async def on_exchange_third_party_invite_request(self, room_id, event_dict):
|
||||
ret = await self.handler.on_exchange_third_party_invite_request(
|
||||
room_id, event_dict
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_server_matches_acl(self, server_name, room_id):
|
||||
async def check_server_matches_acl(self, server_name, room_id):
|
||||
"""Check if the given server is allowed by the server ACLs in the room
|
||||
|
||||
Args:
|
||||
|
@ -680,13 +652,13 @@ class FederationServer(FederationBase):
|
|||
Raises:
|
||||
AuthError if the server does not match the ACL
|
||||
"""
|
||||
state_ids = yield self.store.get_current_state_ids(room_id)
|
||||
state_ids = await self.store.get_current_state_ids(room_id)
|
||||
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
||||
|
||||
if not acl_event_id:
|
||||
return
|
||||
|
||||
acl_event = yield self.store.get_event(acl_event_id)
|
||||
acl_event = await self.store.get_event(acl_event_id)
|
||||
if server_matches_acl_event(server_name, acl_event):
|
||||
return
|
||||
|
||||
|
@ -709,7 +681,7 @@ def server_matches_acl_event(server_name, acl_event):
|
|||
# server name is a literal IP
|
||||
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
|
||||
if not isinstance(allow_ip_literals, bool):
|
||||
logger.warn("Ignorning non-bool allow_ip_literals flag")
|
||||
logger.warning("Ignorning non-bool allow_ip_literals flag")
|
||||
allow_ip_literals = True
|
||||
if not allow_ip_literals:
|
||||
# check for ipv6 literals. These start with '['.
|
||||
|
@ -723,7 +695,7 @@ def server_matches_acl_event(server_name, acl_event):
|
|||
# next, check the deny list
|
||||
deny = acl_event.content.get("deny", [])
|
||||
if not isinstance(deny, (list, tuple)):
|
||||
logger.warn("Ignorning non-list deny ACL %s", deny)
|
||||
logger.warning("Ignorning non-list deny ACL %s", deny)
|
||||
deny = []
|
||||
for e in deny:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
|
@ -733,7 +705,7 @@ def server_matches_acl_event(server_name, acl_event):
|
|||
# then the allow list.
|
||||
allow = acl_event.content.get("allow", [])
|
||||
if not isinstance(allow, (list, tuple)):
|
||||
logger.warn("Ignorning non-list allow ACL %s", allow)
|
||||
logger.warning("Ignorning non-list allow ACL %s", allow)
|
||||
allow = []
|
||||
for e in allow:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
|
@ -747,7 +719,7 @@ def server_matches_acl_event(server_name, acl_event):
|
|||
|
||||
def _acl_entry_matches(server_name, acl_entry):
|
||||
if not isinstance(acl_entry, six.string_types):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
|
||||
)
|
||||
return False
|
||||
|
@ -799,15 +771,14 @@ class FederationHandlerRegistry(object):
|
|||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
async def on_edu(self, edu_type, origin, content):
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for EDU type %s", edu_type)
|
||||
logger.warning("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
with start_active_span_from_edu(content, "handle_edu"):
|
||||
try:
|
||||
yield handler(origin, content)
|
||||
await handler(origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception:
|
||||
|
@ -816,7 +787,7 @@ class FederationHandlerRegistry(object):
|
|||
def on_query(self, query_type, args):
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for query type %s", query_type)
|
||||
logger.warning("No handler registered for query type %s", query_type)
|
||||
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
|
||||
|
||||
return handler(args)
|
||||
|
@ -840,7 +811,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
|||
|
||||
super(ReplicationFederationHandlerRegistry, self).__init__()
|
||||
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
async def on_edu(self, edu_type, origin, content):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
if not self.config.use_presence and edu_type == "m.presence":
|
||||
|
@ -848,17 +819,17 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
|||
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
return super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||
return await super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||
edu_type, origin, content
|
||||
)
|
||||
|
||||
return self._send_edu(edu_type=edu_type, origin=origin, content=content)
|
||||
return await self._send_edu(edu_type=edu_type, origin=origin, content=content)
|
||||
|
||||
def on_query(self, query_type, args):
|
||||
async def on_query(self, query_type, args):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if handler:
|
||||
return handler(args)
|
||||
return await handler(args)
|
||||
|
||||
return self._get_query_client(query_type=query_type, args=args)
|
||||
return await self._get_query_client(query_type=query_type, args=args)
|
||||
|
|
|
@ -36,6 +36,8 @@ from six import iteritems
|
|||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.metrics import Measure
|
||||
|
@ -212,7 +214,7 @@ class FederationRemoteSendQueue(object):
|
|||
receipt (synapse.types.ReadReceipt):
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
pass
|
||||
return defer.succeed(None)
|
||||
|
||||
def send_presence(self, states):
|
||||
"""As per FederationSender
|
||||
|
|
|
@ -192,15 +192,16 @@ class PerDestinationQueue(object):
|
|||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = (
|
||||
yield self._get_device_update_edus(limit)
|
||||
device_update_edus, dev_list_id = yield self._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
to_device_edus, device_stream_id = (
|
||||
yield self._get_to_device_message_edus(limit)
|
||||
)
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = yield self._get_to_device_message_edus(limit)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
|
@ -359,20 +360,20 @@ class PerDestinationQueue(object):
|
|||
last_device_list = self._last_device_list_stream_id
|
||||
|
||||
# Retrieve list of new device updates to send to the destination
|
||||
now_stream_id, results = yield self._store.get_devices_by_remote(
|
||||
now_stream_id, results = yield self._store.get_device_updates_by_remote(
|
||||
self._destination, last_device_list, limit=limit
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.device_list_update",
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
for content in results
|
||||
for (edu_type, content) in results
|
||||
]
|
||||
|
||||
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
||||
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
|
||||
|
||||
return (edus, now_stream_id)
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ class TransactionManager(object):
|
|||
if code == 200:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
destination,
|
||||
txn_id,
|
||||
|
@ -155,7 +155,7 @@ class TransactionManager(object):
|
|||
)
|
||||
else:
|
||||
for p in pdus:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Failed to send event %s",
|
||||
destination,
|
||||
txn_id,
|
||||
|
|
|
@ -122,10 +122,10 @@ class TransportLayerClient(object):
|
|||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug(
|
||||
"backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
|
||||
"backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
|
||||
destination,
|
||||
room_id,
|
||||
repr(event_tuples),
|
||||
event_tuples,
|
||||
str(limit),
|
||||
)
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ def _parse_auth_header(header_bytes):
|
|||
sig = strip_quotes(param_dict["sig"])
|
||||
return origin, key, sig
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Error parsing auth header '%s': %s",
|
||||
header_bytes.decode("ascii", "replace"),
|
||||
e,
|
||||
|
@ -287,10 +287,12 @@ class BaseFederationServlet(object):
|
|||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.warn("authenticate_request failed: missing authentication")
|
||||
logger.warning(
|
||||
"authenticate_request failed: missing authentication"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warn("authenticate_request failed: %s", e)
|
||||
logger.warning("authenticate_request failed: %s", e)
|
||||
raise
|
||||
|
||||
request_tags = {
|
||||
|
|
|
@ -181,7 +181,7 @@ class GroupAttestionRenewer(object):
|
|||
elif not self.is_mine_id(user_id):
|
||||
destination = get_domain_from_id(user_id)
|
||||
else:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Incorrectly trying to do attestations for user: %r in %r",
|
||||
user_id,
|
||||
group_id,
|
||||
|
|
|
@ -488,7 +488,7 @@ class GroupsServerHandler(object):
|
|||
profile = yield self.profile_handler.get_profile_from_cache(user_id)
|
||||
user_profile.update(profile)
|
||||
except Exception as e:
|
||||
logger.warn("Error getting profile for %s: %s", user_id, e)
|
||||
logger.warning("Error getting profile for %s: %s", user_id, e)
|
||||
user_profiles.append(user_profile)
|
||||
|
||||
return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
||||
|
|
|
@ -38,9 +38,10 @@ class AccountDataEventSource(object):
|
|||
{"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id}
|
||||
)
|
||||
|
||||
account_data, room_account_data = (
|
||||
yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
|
||||
)
|
||||
(
|
||||
account_data,
|
||||
room_account_data,
|
||||
) = yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
|
||||
|
||||
for account_data_type, content in account_data.items():
|
||||
results.append({"type": account_data_type, "content": content})
|
||||
|
|
|
@ -30,6 +30,9 @@ class AdminHandler(BaseHandler):
|
|||
def __init__(self, hs):
|
||||
super(AdminHandler, self).__init__(hs)
|
||||
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_whois(self, user):
|
||||
connections = []
|
||||
|
@ -205,7 +208,7 @@ class AdminHandler(BaseHandler):
|
|||
|
||||
from_key = events[-1].internal_metadata.after
|
||||
|
||||
events = yield filter_events_for_client(self.store, user_id, events)
|
||||
events = yield filter_events_for_client(self.storage, user_id, events)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
|
@ -241,7 +244,7 @@ class AdminHandler(BaseHandler):
|
|||
for event_id in extremities:
|
||||
if not event_to_unseen_prevs[event_id]:
|
||||
continue
|
||||
state = yield self.store.get_state_for_event(event_id)
|
||||
state = yield self.state_store.get_state_for_event(event_id)
|
||||
writer.write_state(room_id, event_id, state)
|
||||
|
||||
return writer.finished()
|
||||
|
|
|
@ -73,7 +73,10 @@ class ApplicationServicesHandler(object):
|
|||
try:
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound, events = yield self.store.get_new_events_for_appservice(
|
||||
(
|
||||
upper_bound,
|
||||
events,
|
||||
) = yield self.store.get_new_events_for_appservice(
|
||||
self.current_max, limit
|
||||
)
|
||||
|
||||
|
|
|
@ -525,7 +525,7 @@ class AuthHandler(BaseHandler):
|
|||
|
||||
result = None
|
||||
if not user_infos:
|
||||
logger.warn("Attempted to login as %s but they do not exist", user_id)
|
||||
logger.warning("Attempted to login as %s but they do not exist", user_id)
|
||||
elif len(user_infos) == 1:
|
||||
# a single match (possibly not exact)
|
||||
result = user_infos.popitem()
|
||||
|
@ -534,7 +534,7 @@ class AuthHandler(BaseHandler):
|
|||
result = (user_id, user_infos[user_id])
|
||||
else:
|
||||
# multiple matches, none of them exact
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Attempted to login as %s but it matches more than one user "
|
||||
"inexactly: %r",
|
||||
user_id,
|
||||
|
@ -728,7 +728,7 @@ class AuthHandler(BaseHandler):
|
|||
|
||||
result = yield self.validate_hash(password, password_hash)
|
||||
if not result:
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
logger.warning("Failed password login for user %s", user_id)
|
||||
return None
|
||||
return user_id
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler):
|
|||
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.state_store = hs.get_storage().state
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
|
||||
@trace
|
||||
|
@ -178,7 +179,7 @@ class DeviceWorkerHandler(BaseHandler):
|
|||
continue
|
||||
|
||||
# mapping from event_id -> state_dict
|
||||
prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
|
||||
prev_state_ids = yield self.state_store.get_state_ids_for_events(event_ids)
|
||||
|
||||
# Check if we've joined the room? If so we just blindly add all the users to
|
||||
# the "possibly changed" users.
|
||||
|
@ -458,7 +459,18 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||
@defer.inlineCallbacks
|
||||
def on_federation_query_user_devices(self, user_id):
|
||||
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
|
||||
return {"user_id": user_id, "stream_id": stream_id, "devices": devices}
|
||||
master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
|
||||
self_signing_key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, "self_signing"
|
||||
)
|
||||
|
||||
return {
|
||||
"user_id": user_id,
|
||||
"stream_id": stream_id,
|
||||
"devices": devices,
|
||||
"master_key": master_key,
|
||||
"self_signing_key": self_signing_key,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_left_room(self, user, room_id):
|
||||
|
@ -656,7 +668,7 @@ class DeviceListUpdater(object):
|
|||
except (NotRetryingDestination, RequestSendFailed, HttpResponseException):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn("Failed to handle device list update for %s", user_id)
|
||||
logger.warning("Failed to handle device list update for %s", user_id)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
# is out of date. If we bail then we will retry the resync
|
||||
|
@ -694,7 +706,7 @@ class DeviceListUpdater(object):
|
|||
# up on storing the total list of devices and only handle the
|
||||
# delta instead.
|
||||
if len(devices) > 1000:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
|
||||
user_id,
|
||||
len(devices),
|
||||
|
|
|
@ -52,7 +52,7 @@ class DeviceMessageHandler(object):
|
|||
local_messages = {}
|
||||
sender_user_id = content["sender"]
|
||||
if origin != get_domain_from_id(sender_user_id):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Dropping device message from %r with spoofed sender %r",
|
||||
origin,
|
||||
sender_user_id,
|
||||
|
|
|
@ -250,7 +250,7 @@ class DirectoryHandler(BaseHandler):
|
|||
ignore_backoff=True,
|
||||
)
|
||||
except CodeMessageException as e:
|
||||
logging.warn("Error retrieving alias")
|
||||
logging.warning("Error retrieving alias")
|
||||
if e.code == 404:
|
||||
result = None
|
||||
else:
|
||||
|
|
|
@ -36,6 +36,8 @@ from synapse.types import (
|
|||
get_verify_key_from_cross_signing_key,
|
||||
)
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -49,10 +51,19 @@ class E2eKeysHandler(object):
|
|||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._edu_updater = SigningKeyEduUpdater(hs, self)
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
# FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
|
||||
federation_registry.register_edu_handler(
|
||||
"org.matrix.signing_key_update",
|
||||
self._edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
federation_registry.register_query_handler(
|
||||
"client_keys", self.on_federation_query_client_keys
|
||||
)
|
||||
|
||||
|
@ -119,9 +130,10 @@ class E2eKeysHandler(object):
|
|||
else:
|
||||
query_list.append((user_id, None))
|
||||
|
||||
user_ids_not_in_cache, remote_results = (
|
||||
yield self.store.get_user_devices_from_cache(query_list)
|
||||
)
|
||||
(
|
||||
user_ids_not_in_cache,
|
||||
remote_results,
|
||||
) = yield self.store.get_user_devices_from_cache(query_list)
|
||||
for user_id, devices in iteritems(remote_results):
|
||||
user_devices = results.setdefault(user_id, {})
|
||||
for device_id, device in iteritems(devices):
|
||||
|
@ -207,13 +219,15 @@ class E2eKeysHandler(object):
|
|||
if user_id in destination_query:
|
||||
results[user_id] = keys
|
||||
|
||||
for user_id, key in remote_result["master_keys"].items():
|
||||
if user_id in destination_query:
|
||||
cross_signing_keys["master_keys"][user_id] = key
|
||||
if "master_keys" in remote_result:
|
||||
for user_id, key in remote_result["master_keys"].items():
|
||||
if user_id in destination_query:
|
||||
cross_signing_keys["master_keys"][user_id] = key
|
||||
|
||||
for user_id, key in remote_result["self_signing_keys"].items():
|
||||
if user_id in destination_query:
|
||||
cross_signing_keys["self_signing_keys"][user_id] = key
|
||||
if "self_signing_keys" in remote_result:
|
||||
for user_id, key in remote_result["self_signing_keys"].items():
|
||||
if user_id in destination_query:
|
||||
cross_signing_keys["self_signing_keys"][user_id] = key
|
||||
|
||||
except Exception as e:
|
||||
failure = _exception_to_failure(e)
|
||||
|
@ -251,7 +265,7 @@ class E2eKeysHandler(object):
|
|||
|
||||
Returns:
|
||||
defer.Deferred[dict[str, dict[str, dict]]]: map from
|
||||
(master|self_signing|user_signing) -> user_id -> key
|
||||
(master_keys|self_signing_keys|user_signing_keys) -> user_id -> key
|
||||
"""
|
||||
master_keys = {}
|
||||
self_signing_keys = {}
|
||||
|
@ -343,7 +357,16 @@ class E2eKeysHandler(object):
|
|||
"""
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
res = yield self.query_local_devices(device_keys_query)
|
||||
return {"device_keys": res}
|
||||
ret = {"device_keys": res}
|
||||
|
||||
# add in the cross-signing keys
|
||||
cross_signing_keys = yield self.get_cross_signing_keys_from_cache(
|
||||
device_keys_query, None
|
||||
)
|
||||
|
||||
ret.update(cross_signing_keys)
|
||||
|
||||
return ret
|
||||
|
||||
@trace
|
||||
@defer.inlineCallbacks
|
||||
|
@ -688,17 +711,21 @@ class E2eKeysHandler(object):
|
|||
|
||||
try:
|
||||
# get our self-signing key to verify the signatures
|
||||
_, self_signing_key_id, self_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "self_signing"
|
||||
)
|
||||
(
|
||||
_,
|
||||
self_signing_key_id,
|
||||
self_signing_verify_key,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(user_id, "self_signing")
|
||||
|
||||
# get our master key, since we may have received a signature of it.
|
||||
# We need to fetch it here so that we know what its key ID is, so
|
||||
# that we can check if a signature that was sent is a signature of
|
||||
# the master key or of a device
|
||||
master_key, _, master_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "master"
|
||||
)
|
||||
(
|
||||
master_key,
|
||||
_,
|
||||
master_verify_key,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(user_id, "master")
|
||||
|
||||
# fetch our stored devices. This is used to 1. verify
|
||||
# signatures on the master key, and 2. to compare with what
|
||||
|
@ -838,9 +865,11 @@ class E2eKeysHandler(object):
|
|||
|
||||
try:
|
||||
# get our user-signing key to verify the signatures
|
||||
user_signing_key, user_signing_key_id, user_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "user_signing"
|
||||
)
|
||||
(
|
||||
user_signing_key,
|
||||
user_signing_key_id,
|
||||
user_signing_verify_key,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(user_id, "user_signing")
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
for user, devicemap in signatures.items():
|
||||
|
@ -859,7 +888,11 @@ class E2eKeysHandler(object):
|
|||
try:
|
||||
# get the target user's master key, to make sure it matches
|
||||
# what was sent
|
||||
master_key, master_key_id, _ = yield self._get_e2e_cross_signing_verify_key(
|
||||
(
|
||||
master_key,
|
||||
master_key_id,
|
||||
_,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(
|
||||
target_user, "master", user_id
|
||||
)
|
||||
|
||||
|
@ -1047,3 +1080,100 @@ class SignatureListItem:
|
|||
target_user_id = attr.ib()
|
||||
target_device_id = attr.ib()
|
||||
signature = attr.ib()
|
||||
|
||||
|
||||
class SigningKeyEduUpdater(object):
|
||||
"""Handles incoming signing key updates from federation and updates the DB"""
|
||||
|
||||
def __init__(self, hs, e2e_keys_handler):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation = hs.get_federation_client()
|
||||
self.clock = hs.get_clock()
|
||||
self.e2e_keys_handler = e2e_keys_handler
|
||||
|
||||
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
|
||||
|
||||
# user_id -> list of updates waiting to be handled.
|
||||
self._pending_updates = {}
|
||||
|
||||
# Recently seen stream ids. We don't bother keeping these in the DB,
|
||||
# but they're useful to have them about to reduce the number of spurious
|
||||
# resyncs.
|
||||
self._seen_updates = ExpiringCache(
|
||||
cache_name="signing_key_update_edu",
|
||||
clock=self.clock,
|
||||
max_len=10000,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
iterable=True,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def incoming_signing_key_update(self, origin, edu_content):
|
||||
"""Called on incoming signing key update from federation. Responsible for
|
||||
parsing the EDU and adding to pending updates list.
|
||||
|
||||
Args:
|
||||
origin (string): the server that sent the EDU
|
||||
edu_content (dict): the contents of the EDU
|
||||
"""
|
||||
|
||||
user_id = edu_content.pop("user_id")
|
||||
master_key = edu_content.pop("master_key", None)
|
||||
self_signing_key = edu_content.pop("self_signing_key", None)
|
||||
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
logger.warning("Got signing key update edu for %r from %r", user_id, origin)
|
||||
return
|
||||
|
||||
room_ids = yield self.store.get_rooms_for_user(user_id)
|
||||
if not room_ids:
|
||||
# We don't share any rooms with this user. Ignore update, as we
|
||||
# probably won't get any further updates.
|
||||
return
|
||||
|
||||
self._pending_updates.setdefault(user_id, []).append(
|
||||
(master_key, self_signing_key)
|
||||
)
|
||||
|
||||
yield self._handle_signing_key_updates(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_signing_key_updates(self, user_id):
|
||||
"""Actually handle pending updates.
|
||||
|
||||
Args:
|
||||
user_id (string): the user whose updates we are processing
|
||||
"""
|
||||
|
||||
device_handler = self.e2e_keys_handler.device_handler
|
||||
|
||||
with (yield self._remote_edu_linearizer.queue(user_id)):
|
||||
pending_updates = self._pending_updates.pop(user_id, [])
|
||||
if not pending_updates:
|
||||
# This can happen since we batch updates
|
||||
return
|
||||
|
||||
device_ids = []
|
||||
|
||||
logger.info("pending updates: %r", pending_updates)
|
||||
|
||||
for master_key, self_signing_key in pending_updates:
|
||||
if master_key:
|
||||
yield self.store.set_e2e_cross_signing_key(
|
||||
user_id, "master", master_key
|
||||
)
|
||||
_, verify_key = get_verify_key_from_cross_signing_key(master_key)
|
||||
# verify_key is a VerifyKey from signedjson, which uses
|
||||
# .version to denote the portion of the key ID after the
|
||||
# algorithm and colon, which is the device ID
|
||||
device_ids.append(verify_key.version)
|
||||
if self_signing_key:
|
||||
yield self.store.set_e2e_cross_signing_key(
|
||||
user_id, "self_signing", self_signing_key
|
||||
)
|
||||
_, verify_key = get_verify_key_from_cross_signing_key(
|
||||
self_signing_key
|
||||
)
|
||||
device_ids.append(verify_key.version)
|
||||
|
||||
yield device_handler.notify_device_update(user_id, device_ids)
|
||||
|
|
|
@ -147,6 +147,10 @@ class EventStreamHandler(BaseHandler):
|
|||
|
||||
|
||||
class EventHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(EventHandler, self).__init__(hs)
|
||||
self.storage = hs.get_storage()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_event(self, user, room_id, event_id):
|
||||
"""Retrieve a single specified event.
|
||||
|
@ -172,7 +176,7 @@ class EventHandler(BaseHandler):
|
|||
is_peeking = user.to_string() not in users
|
||||
|
||||
filtered = yield filter_events_for_client(
|
||||
self.store, user.to_string(), [event], is_peeking=is_peeking
|
||||
self.storage, user.to_string(), [event], is_peeking=is_peeking
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
|
|
@ -45,6 +45,7 @@ from synapse.api.errors import (
|
|||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.event_auth import auth_types_for_event
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
|
@ -109,6 +110,8 @@ class FederationHandler(BaseHandler):
|
|||
self.hs = hs
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
self.federation_client = hs.get_federation_client()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
|
@ -180,7 +183,7 @@ class FederationHandler(BaseHandler):
|
|||
try:
|
||||
self._sanity_check_event(pdu)
|
||||
except SynapseError as err:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Received event failed sanity checks", room_id, event_id
|
||||
)
|
||||
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
|
||||
|
@ -301,7 +304,7 @@ class FederationHandler(BaseHandler):
|
|||
# following.
|
||||
|
||||
if sent_to_us_directly:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
|
||||
room_id,
|
||||
event_id,
|
||||
|
@ -324,7 +327,7 @@ class FederationHandler(BaseHandler):
|
|||
event_map = {event_id: pdu}
|
||||
try:
|
||||
# Get the state of the events we know about
|
||||
ours = yield self.store.get_state_groups_ids(room_id, seen)
|
||||
ours = yield self.state_store.get_state_groups_ids(room_id, seen)
|
||||
|
||||
# state_maps is a list of mappings from (type, state_key) to event_id
|
||||
state_maps = list(
|
||||
|
@ -350,10 +353,11 @@ class FederationHandler(BaseHandler):
|
|||
# note that if any of the missing prevs share missing state or
|
||||
# auth events, the requests to fetch those events are deduped
|
||||
# by the get_pdu_cache in federation_client.
|
||||
remote_state, got_auth_chain = (
|
||||
yield self.federation_client.get_state_for_room(
|
||||
origin, room_id, p
|
||||
)
|
||||
(
|
||||
remote_state,
|
||||
got_auth_chain,
|
||||
) = yield self.federation_client.get_state_for_room(
|
||||
origin, room_id, p
|
||||
)
|
||||
|
||||
# we want the state *after* p; get_state_for_room returns the
|
||||
|
@ -405,7 +409,7 @@ class FederationHandler(BaseHandler):
|
|||
state = [event_map[e] for e in six.itervalues(state_map)]
|
||||
auth_chain = list(auth_chains)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Error attempting to resolve state at missing "
|
||||
"prev_events",
|
||||
room_id,
|
||||
|
@ -518,7 +522,9 @@ class FederationHandler(BaseHandler):
|
|||
# We failed to get the missing events, but since we need to handle
|
||||
# the case of `get_missing_events` not returning the necessary
|
||||
# events anyway, it is safe to simply log the error and continue.
|
||||
logger.warn("[%s %s]: Failed to get prev_events: %s", room_id, event_id, e)
|
||||
logger.warning(
|
||||
"[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
|
@ -545,7 +551,7 @@ class FederationHandler(BaseHandler):
|
|||
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
||||
except FederationError as e:
|
||||
if e.code == 403:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Received prev_event %s failed history check.",
|
||||
room_id,
|
||||
event_id,
|
||||
|
@ -888,7 +894,7 @@ class FederationHandler(BaseHandler):
|
|||
# We set `check_history_visibility_only` as we might otherwise get false
|
||||
# positives from users having been erased.
|
||||
filtered_extremities = yield filter_events_for_server(
|
||||
self.store,
|
||||
self.storage,
|
||||
self.server_name,
|
||||
list(extremities_events.values()),
|
||||
redact=False,
|
||||
|
@ -1059,7 +1065,7 @@ class FederationHandler(BaseHandler):
|
|||
SynapseError if the event does not pass muster
|
||||
"""
|
||||
if len(ev.prev_event_ids()) > 20:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Rejecting event %s which has %i prev_events",
|
||||
ev.event_id,
|
||||
len(ev.prev_event_ids()),
|
||||
|
@ -1067,7 +1073,7 @@ class FederationHandler(BaseHandler):
|
|||
raise SynapseError(http_client.BAD_REQUEST, "Too many prev_events")
|
||||
|
||||
if len(ev.auth_event_ids()) > 10:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Rejecting event %s which has %i auth_events",
|
||||
ev.event_id,
|
||||
len(ev.auth_event_ids()),
|
||||
|
@ -1101,7 +1107,7 @@ class FederationHandler(BaseHandler):
|
|||
@defer.inlineCallbacks
|
||||
def do_invite_join(self, target_hosts, room_id, joinee, content):
|
||||
""" Attempts to join the `joinee` to the room `room_id` via the
|
||||
server `target_host`.
|
||||
servers contained in `target_hosts`.
|
||||
|
||||
This first triggers a /make_join/ request that returns a partial
|
||||
event that we can fill out and sign. This is then sent to the
|
||||
|
@ -1110,6 +1116,15 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
We suspend processing of any received events from this room until we
|
||||
have finished processing the join.
|
||||
|
||||
Args:
|
||||
target_hosts (Iterable[str]): List of servers to attempt to join the room with.
|
||||
|
||||
room_id (str): The ID of the room to join.
|
||||
|
||||
joinee (str): The User ID of the joining user.
|
||||
|
||||
content (dict): The event content to use for the join event.
|
||||
"""
|
||||
logger.debug("Joining %s to %s", joinee, room_id)
|
||||
|
||||
|
@ -1169,6 +1184,22 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
yield self._persist_auth_tree(origin, auth_chain, state, event)
|
||||
|
||||
# Check whether this room is the result of an upgrade of a room we already know
|
||||
# about. If so, migrate over user information
|
||||
predecessor = yield self.store.get_room_predecessor(room_id)
|
||||
if not predecessor:
|
||||
return
|
||||
old_room_id = predecessor["room_id"]
|
||||
logger.debug(
|
||||
"Found predecessor for %s during remote join: %s", room_id, old_room_id
|
||||
)
|
||||
|
||||
# We retrieve the room member handler here as to not cause a cyclic dependency
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.transfer_room_state_on_room_upgrade(
|
||||
old_room_id, room_id
|
||||
)
|
||||
|
||||
logger.debug("Finished joining %s to %s", joinee, room_id)
|
||||
finally:
|
||||
room_queue = self.room_queues[room_id]
|
||||
|
@ -1203,7 +1234,7 @@ class FederationHandler(BaseHandler):
|
|||
with nested_logging_context(p.event_id):
|
||||
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e
|
||||
)
|
||||
|
||||
|
@ -1250,7 +1281,7 @@ class FederationHandler(BaseHandler):
|
|||
builder=builder
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create join %r because %s", event, e)
|
||||
logger.warning("Failed to create join to %s because %s", room_id, e)
|
||||
raise e
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
|
@ -1494,7 +1525,7 @@ class FederationHandler(BaseHandler):
|
|||
room_version, event, context, do_sig_check=False
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create new leave %r because %s", event, e)
|
||||
logger.warning("Failed to create new leave %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
return event
|
||||
|
@ -1549,7 +1580,7 @@ class FederationHandler(BaseHandler):
|
|||
event_id, allow_none=False, check_room_id=room_id
|
||||
)
|
||||
|
||||
state_groups = yield self.store.get_state_groups(room_id, [event_id])
|
||||
state_groups = yield self.state_store.get_state_groups(room_id, [event_id])
|
||||
|
||||
if state_groups:
|
||||
_, state = list(iteritems(state_groups)).pop()
|
||||
|
@ -1578,7 +1609,7 @@ class FederationHandler(BaseHandler):
|
|||
event_id, allow_none=False, check_room_id=room_id
|
||||
)
|
||||
|
||||
state_groups = yield self.store.get_state_groups_ids(room_id, [event_id])
|
||||
state_groups = yield self.state_store.get_state_groups_ids(room_id, [event_id])
|
||||
|
||||
if state_groups:
|
||||
_, state = list(state_groups.items()).pop()
|
||||
|
@ -1606,7 +1637,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
events = yield self.store.get_backfill_events(room_id, pdu_list, limit)
|
||||
|
||||
events = yield filter_events_for_server(self.store, origin, events)
|
||||
events = yield filter_events_for_server(self.storage, origin, events)
|
||||
|
||||
return events
|
||||
|
||||
|
@ -1636,7 +1667,7 @@ class FederationHandler(BaseHandler):
|
|||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
events = yield filter_events_for_server(self.store, origin, [event])
|
||||
events = yield filter_events_for_server(self.storage, origin, [event])
|
||||
event = events[0]
|
||||
return event
|
||||
else:
|
||||
|
@ -1788,7 +1819,7 @@ class FederationHandler(BaseHandler):
|
|||
# cause SynapseErrors in auth.check. We don't want to give up
|
||||
# the attempt to federate altogether in such cases.
|
||||
|
||||
logger.warn("Rejecting %s because %s", e.event_id, err.msg)
|
||||
logger.warning("Rejecting %s because %s", e.event_id, err.msg)
|
||||
|
||||
if e == event:
|
||||
raise
|
||||
|
@ -1841,12 +1872,7 @@ class FederationHandler(BaseHandler):
|
|||
if c and c.type == EventTypes.Create:
|
||||
auth_events[(c.type, c.state_key)] = c
|
||||
|
||||
try:
|
||||
yield self.do_auth(origin, event, context, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg)
|
||||
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
context = yield self.do_auth(origin, event, context, auth_events=auth_events)
|
||||
|
||||
if not context.rejected:
|
||||
yield self._check_for_soft_fail(event, state, backfilled)
|
||||
|
@ -1902,7 +1928,7 @@ class FederationHandler(BaseHandler):
|
|||
# given state at the event. This should correctly handle cases
|
||||
# like bans, especially with state res v2.
|
||||
|
||||
state_sets = yield self.store.get_state_groups(
|
||||
state_sets = yield self.state_store.get_state_groups(
|
||||
event.room_id, extrem_ids
|
||||
)
|
||||
state_sets = list(state_sets.values())
|
||||
|
@ -1938,7 +1964,7 @@ class FederationHandler(BaseHandler):
|
|||
try:
|
||||
event_auth.check(room_version, event, auth_events=current_auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Soft-failing %r because %s", event, e)
|
||||
logger.warning("Soft-failing %r because %s", event, e)
|
||||
event.internal_metadata.soft_failed = True
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -1993,7 +2019,7 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
missing_events = yield filter_events_for_server(
|
||||
self.store, origin, missing_events
|
||||
self.storage, origin, missing_events
|
||||
)
|
||||
|
||||
return missing_events
|
||||
|
@ -2015,12 +2041,12 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
Also NB that this function adds entries to it.
|
||||
Returns:
|
||||
defer.Deferred[None]
|
||||
defer.Deferred[EventContext]: updated context object
|
||||
"""
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
try:
|
||||
yield self._update_auth_events_and_context_for_auth(
|
||||
context = yield self._update_auth_events_and_context_for_auth(
|
||||
origin, event, context, auth_events
|
||||
)
|
||||
except Exception:
|
||||
|
@ -2037,8 +2063,10 @@ class FederationHandler(BaseHandler):
|
|||
try:
|
||||
event_auth.check(room_version, event, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
logger.warning("Failed auth resolution for %r because %s", event, e)
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
return context
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_auth_events_and_context_for_auth(
|
||||
|
@ -2062,7 +2090,7 @@ class FederationHandler(BaseHandler):
|
|||
auth_events (dict[(str, str)->synapse.events.EventBase]):
|
||||
|
||||
Returns:
|
||||
defer.Deferred[None]
|
||||
defer.Deferred[EventContext]: updated context
|
||||
"""
|
||||
event_auth_events = set(event.auth_event_ids())
|
||||
|
||||
|
@ -2101,7 +2129,7 @@ class FederationHandler(BaseHandler):
|
|||
# The other side isn't around or doesn't implement the
|
||||
# endpoint, so lets just bail out.
|
||||
logger.info("Failed to get event auth from remote: %s", e)
|
||||
return
|
||||
return context
|
||||
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in remote_auth_chain]
|
||||
|
@ -2142,7 +2170,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
if event.internal_metadata.is_outlier():
|
||||
logger.info("Skipping auth_event fetch for outlier")
|
||||
return
|
||||
return context
|
||||
|
||||
# FIXME: Assumes we have and stored all the state for all the
|
||||
# prev_events
|
||||
|
@ -2151,7 +2179,7 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
if not different_auth:
|
||||
return
|
||||
return context
|
||||
|
||||
logger.info(
|
||||
"auth_events refers to events which are not in our calculated auth "
|
||||
|
@ -2198,10 +2226,12 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
auth_events.update(new_state)
|
||||
|
||||
yield self._update_context_for_auth_events(
|
||||
context = yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key
|
||||
)
|
||||
|
||||
return context
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_context_for_auth_events(self, event, context, auth_events, event_key):
|
||||
"""Update the state_ids in an event context after auth event resolution,
|
||||
|
@ -2210,14 +2240,16 @@ class FederationHandler(BaseHandler):
|
|||
Args:
|
||||
event (Event): The event we're handling the context for
|
||||
|
||||
context (synapse.events.snapshot.EventContext): event context
|
||||
to be updated
|
||||
context (synapse.events.snapshot.EventContext): initial event context
|
||||
|
||||
auth_events (dict[(str, str)->str]): Events to update in the event
|
||||
context.
|
||||
|
||||
event_key ((str, str)): (type, state_key) for the current event.
|
||||
this will not be included in the current_state in the context.
|
||||
|
||||
Returns:
|
||||
Deferred[EventContext]: new event context
|
||||
"""
|
||||
state_updates = {
|
||||
k: a.event_id for k, a in iteritems(auth_events) if k != event_key
|
||||
|
@ -2234,7 +2266,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
# create a new state group as a delta from the existing one.
|
||||
prev_group = context.state_group
|
||||
state_group = yield self.store.store_state_group(
|
||||
state_group = yield self.state_store.store_state_group(
|
||||
event.event_id,
|
||||
event.room_id,
|
||||
prev_group=prev_group,
|
||||
|
@ -2242,7 +2274,7 @@ class FederationHandler(BaseHandler):
|
|||
current_state_ids=current_state_ids,
|
||||
)
|
||||
|
||||
yield context.update_state(
|
||||
return EventContext.with_state(
|
||||
state_group=state_group,
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
|
@ -2431,10 +2463,12 @@ class FederationHandler(BaseHandler):
|
|||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying new third party invite %r because %s", event, e)
|
||||
logger.warning("Denying new third party invite %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
yield self._check_signature(event, context)
|
||||
|
||||
# We retrieve the room member handler here as to not cause a cyclic dependency
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
else:
|
||||
|
@ -2487,7 +2521,7 @@ class FederationHandler(BaseHandler):
|
|||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying third party invite %r because %s", event, e)
|
||||
logger.warning("Denying third party invite %r because %s", event, e)
|
||||
raise e
|
||||
yield self._check_signature(event, context)
|
||||
|
||||
|
@ -2495,6 +2529,7 @@ class FederationHandler(BaseHandler):
|
|||
# though the sender isn't a local user.
|
||||
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
|
||||
|
||||
# We retrieve the room member handler here as to not cause a cyclic dependency
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
|
@ -2664,7 +2699,7 @@ class FederationHandler(BaseHandler):
|
|||
backfilled=backfilled,
|
||||
)
|
||||
else:
|
||||
max_stream_id = yield self.store.persist_events(
|
||||
max_stream_id = yield self.storage.persistence.persist_events(
|
||||
event_and_contexts, backfilled=backfilled
|
||||
)
|
||||
|
||||
|
|
|
@ -392,7 +392,7 @@ class GroupsLocalHandler(object):
|
|||
try:
|
||||
user_profile = yield self.profile_handler.get_profile(user_id)
|
||||
except Exception as e:
|
||||
logger.warn("No profile for user %s: %s", user_id, e)
|
||||
logger.warning("No profile for user %s: %s", user_id, e)
|
||||
user_profile = {}
|
||||
|
||||
return {"state": "invite", "user_profile": user_profile}
|
||||
|
|
|
@ -272,7 +272,7 @@ class IdentityHandler(BaseHandler):
|
|||
changed = False
|
||||
if e.code in (400, 404, 501):
|
||||
# The remote server probably doesn't support unbinding (yet)
|
||||
logger.warn("Received %d response while unbinding threepid", e.code)
|
||||
logger.warning("Received %d response while unbinding threepid", e.code)
|
||||
else:
|
||||
logger.error("Failed to unbind threepid on identity server: %s", e)
|
||||
raise SynapseError(500, "Failed to contact identity server")
|
||||
|
@ -403,7 +403,7 @@ class IdentityHandler(BaseHandler):
|
|||
|
||||
if self.hs.config.using_identity_server_from_trusted_list:
|
||||
# Warn that a deprecated config option is in use
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
'has been replaced by "account_threepid_delegate". '
|
||||
"Please consult the sample config at docs/sample_config.yaml for "
|
||||
|
@ -457,7 +457,7 @@ class IdentityHandler(BaseHandler):
|
|||
|
||||
if self.hs.config.using_identity_server_from_trusted_list:
|
||||
# Warn that a deprecated config option is in use
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
'has been replaced by "account_threepid_delegate". '
|
||||
"Please consult the sample config at docs/sample_config.yaml for "
|
||||
|
|
|
@ -43,6 +43,8 @@ class InitialSyncHandler(BaseHandler):
|
|||
self.validator = EventValidator()
|
||||
self.snapshot_cache = SnapshotCache()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
def snapshot_all_rooms(
|
||||
self,
|
||||
|
@ -126,8 +128,8 @@ class InitialSyncHandler(BaseHandler):
|
|||
|
||||
tags_by_room = yield self.store.get_tags_for_user(user_id)
|
||||
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_account_data_for_user(user_id)
|
||||
account_data, account_data_by_room = yield self.store.get_account_data_for_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
public_room_ids = yield self.store.get_public_room_ids()
|
||||
|
@ -169,7 +171,7 @@ class InitialSyncHandler(BaseHandler):
|
|||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = "s%d" % (event.stream_ordering,)
|
||||
deferred_room_state = run_in_background(
|
||||
self.store.get_state_for_events, [event.event_id]
|
||||
self.state_store.get_state_for_events, [event.event_id]
|
||||
)
|
||||
deferred_room_state.addCallback(
|
||||
lambda states: states[event.event_id]
|
||||
|
@ -189,7 +191,9 @@ class InitialSyncHandler(BaseHandler):
|
|||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield filter_events_for_client(self.store, user_id, messages)
|
||||
messages = yield filter_events_for_client(
|
||||
self.storage, user_id, messages
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token)
|
||||
end_token = now_token.copy_and_replace("room_key", room_end_token)
|
||||
|
@ -307,7 +311,7 @@ class InitialSyncHandler(BaseHandler):
|
|||
def _room_initial_sync_parted(
|
||||
self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking
|
||||
):
|
||||
room_state = yield self.store.get_state_for_events([member_event_id])
|
||||
room_state = yield self.state_store.get_state_for_events([member_event_id])
|
||||
|
||||
room_state = room_state[member_event_id]
|
||||
|
||||
|
@ -322,7 +326,7 @@ class InitialSyncHandler(BaseHandler):
|
|||
)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
self.storage, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token)
|
||||
|
@ -414,7 +418,7 @@ class InitialSyncHandler(BaseHandler):
|
|||
)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
self.storage, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token)
|
||||
|
|
|
@ -59,6 +59,8 @@ class MessageHandler(object):
|
|||
self.clock = hs.get_clock()
|
||||
self.state = hs.get_state_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -74,15 +76,16 @@ class MessageHandler(object):
|
|||
Raises:
|
||||
SynapseError if something went wrong.
|
||||
"""
|
||||
membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
data = yield self.state.get_current_state(room_id, event_type, state_key)
|
||||
elif membership == Membership.LEAVE:
|
||||
key = (event_type, state_key)
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
room_state = yield self.state_store.get_state_for_events(
|
||||
[membership_event_id], StateFilter.from_types([key])
|
||||
)
|
||||
data = room_state[membership_event_id].get(key)
|
||||
|
@ -135,12 +138,12 @@ class MessageHandler(object):
|
|||
raise NotFoundError("Can't find event for token %s" % (at_token,))
|
||||
|
||||
visible_events = yield filter_events_for_client(
|
||||
self.store, user_id, last_events
|
||||
self.storage, user_id, last_events
|
||||
)
|
||||
|
||||
event = last_events[0]
|
||||
if visible_events:
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
room_state = yield self.state_store.get_state_for_events(
|
||||
[event.event_id], state_filter=state_filter
|
||||
)
|
||||
room_state = room_state[event.event_id]
|
||||
|
@ -151,9 +154,10 @@ class MessageHandler(object):
|
|||
% (user_id, room_id, at_token),
|
||||
)
|
||||
else:
|
||||
membership, membership_event_id = (
|
||||
yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
)
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
|
@ -161,7 +165,7 @@ class MessageHandler(object):
|
|||
)
|
||||
room_state = yield self.store.get_events(state_ids.values())
|
||||
elif membership == Membership.LEAVE:
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
room_state = yield self.state_store.get_state_for_events(
|
||||
[membership_event_id], state_filter=state_filter
|
||||
)
|
||||
room_state = room_state[membership_event_id]
|
||||
|
@ -234,6 +238,7 @@ class EventCreationHandler(object):
|
|||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
|
@ -687,7 +692,7 @@ class EventCreationHandler(object):
|
|||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as err:
|
||||
logger.warn("Denying new event %r because %s", event, err)
|
||||
logger.warning("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
|
||||
# Ensure that we can round trip before trying to persist in db
|
||||
|
@ -868,7 +873,7 @@ class EventCreationHandler(object):
|
|||
if prev_state_ids:
|
||||
raise AuthError(403, "Changing the room create event is forbidden")
|
||||
|
||||
(event_stream_id, max_stream_id) = yield self.store.persist_event(
|
||||
event_stream_id, max_stream_id = yield self.storage.persistence.persist_event(
|
||||
event, context=context
|
||||
)
|
||||
|
||||
|
|
|
@ -69,6 +69,8 @@ class PaginationHandler(object):
|
|||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
self.clock = hs.get_clock()
|
||||
self._server_name = hs.hostname
|
||||
|
||||
|
@ -210,9 +212,10 @@ class PaginationHandler(object):
|
|||
source_config = pagin_config.get_source_config("room")
|
||||
|
||||
with (yield self.pagination_lock.read(room_id)):
|
||||
membership, member_event_id = yield self.auth.check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
|
||||
if source_config.direction == "b":
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
|
@ -255,7 +258,7 @@ class PaginationHandler(object):
|
|||
events = event_filter.filter(events)
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store, user_id, events, is_peeking=(member_event_id is None)
|
||||
self.storage, user_id, events, is_peeking=(member_event_id is None)
|
||||
)
|
||||
|
||||
if not events:
|
||||
|
@ -274,7 +277,7 @@ class PaginationHandler(object):
|
|||
(EventTypes.Member, event.sender) for event in events
|
||||
)
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
events[0].event_id, state_filter=state_filter
|
||||
)
|
||||
|
||||
|
@ -295,10 +298,8 @@ class PaginationHandler(object):
|
|||
}
|
||||
|
||||
if state:
|
||||
chunk["state"] = (
|
||||
yield self._event_serializer.serialize_events(
|
||||
state, time_now, as_client_event=as_client_event
|
||||
)
|
||||
chunk["state"] = yield self._event_serializer.serialize_events(
|
||||
state, time_now, as_client_event=as_client_event
|
||||
)
|
||||
|
||||
return chunk
|
||||
|
|
|
@ -275,7 +275,7 @@ class BaseProfileHandler(BaseHandler):
|
|||
ratelimit=False, # Try to hide that these events aren't atomic.
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to update join event for room %s - %s", room_id, str(e)
|
||||
)
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
@ -32,8 +30,7 @@ class ReadMarkerHandler(BaseHandler):
|
|||
self.read_marker_linearizer = Linearizer(name="read_marker")
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_client_read_marker(self, room_id, user_id, event_id):
|
||||
async def received_client_read_marker(self, room_id, user_id, event_id):
|
||||
"""Updates the read marker for a given user in a given room if the event ID given
|
||||
is ahead in the stream relative to the current read marker.
|
||||
|
||||
|
@ -41,8 +38,8 @@ class ReadMarkerHandler(BaseHandler):
|
|||
the read marker has changed.
|
||||
"""
|
||||
|
||||
with (yield self.read_marker_linearizer.queue((room_id, user_id))):
|
||||
existing_read_marker = yield self.store.get_account_data_for_room_and_type(
|
||||
with await self.read_marker_linearizer.queue((room_id, user_id)):
|
||||
existing_read_marker = await self.store.get_account_data_for_room_and_type(
|
||||
user_id, room_id, "m.fully_read"
|
||||
)
|
||||
|
||||
|
@ -50,13 +47,13 @@ class ReadMarkerHandler(BaseHandler):
|
|||
|
||||
if existing_read_marker:
|
||||
# Only update if the new marker is ahead in the stream
|
||||
should_update = yield self.store.is_event_after(
|
||||
should_update = await self.store.is_event_after(
|
||||
event_id, existing_read_marker["event_id"]
|
||||
)
|
||||
|
||||
if should_update:
|
||||
content = {"event_id": event_id}
|
||||
max_id = yield self.store.add_account_data_to_room(
|
||||
max_id = await self.store.add_account_data_to_room(
|
||||
user_id, room_id, "m.fully_read", content
|
||||
)
|
||||
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
|
||||
|
|
|
@ -18,6 +18,7 @@ from twisted.internet import defer
|
|||
|
||||
from synapse.handlers._base import BaseHandler
|
||||
from synapse.types import ReadReceipt, get_domain_from_id
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -36,8 +37,7 @@ class ReceiptsHandler(BaseHandler):
|
|||
self.clock = self.hs.get_clock()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _received_remote_receipt(self, origin, content):
|
||||
async def _received_remote_receipt(self, origin, content):
|
||||
"""Called when we receive an EDU of type m.receipt from a remote HS.
|
||||
"""
|
||||
receipts = []
|
||||
|
@ -62,17 +62,16 @@ class ReceiptsHandler(BaseHandler):
|
|||
)
|
||||
)
|
||||
|
||||
yield self._handle_new_receipts(receipts)
|
||||
await self._handle_new_receipts(receipts)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_new_receipts(self, receipts):
|
||||
async def _handle_new_receipts(self, receipts):
|
||||
"""Takes a list of receipts, stores them and informs the notifier.
|
||||
"""
|
||||
min_batch_id = None
|
||||
max_batch_id = None
|
||||
|
||||
for receipt in receipts:
|
||||
res = yield self.store.insert_receipt(
|
||||
res = await self.store.insert_receipt(
|
||||
receipt.room_id,
|
||||
receipt.receipt_type,
|
||||
receipt.user_id,
|
||||
|
@ -99,14 +98,15 @@ class ReceiptsHandler(BaseHandler):
|
|||
|
||||
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
|
||||
# Note that the min here shouldn't be relied upon to be accurate.
|
||||
yield self.hs.get_pusherpool().on_new_receipts(
|
||||
min_batch_id, max_batch_id, affected_room_ids
|
||||
await maybe_awaitable(
|
||||
self.hs.get_pusherpool().on_new_receipts(
|
||||
min_batch_id, max_batch_id, affected_room_ids
|
||||
)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
|
||||
async def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
|
||||
"""Called when a client tells us a local user has read up to the given
|
||||
event_id in the room.
|
||||
"""
|
||||
|
@ -118,24 +118,11 @@ class ReceiptsHandler(BaseHandler):
|
|||
data={"ts": int(self.clock.time_msec())},
|
||||
)
|
||||
|
||||
is_new = yield self._handle_new_receipts([receipt])
|
||||
is_new = await self._handle_new_receipts([receipt])
|
||||
if not is_new:
|
||||
return
|
||||
|
||||
yield self.federation.send_read_receipt(receipt)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_receipts_for_room(self, room_id, to_key):
|
||||
"""Gets all receipts for a room, upto the given key.
|
||||
"""
|
||||
result = yield self.store.get_linearized_receipts_for_room(
|
||||
room_id, to_key=to_key
|
||||
)
|
||||
|
||||
if not result:
|
||||
return []
|
||||
|
||||
return result
|
||||
await self.federation.send_read_receipt(receipt)
|
||||
|
||||
|
||||
class ReceiptEventSource(object):
|
||||
|
|
|
@ -396,8 +396,8 @@ class RegistrationHandler(BaseHandler):
|
|||
room_id = room_identifier
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
room_id, remote_room_hosts = (
|
||||
yield room_member_handler.lookup_room_alias(room_alias)
|
||||
room_id, remote_room_hosts = yield room_member_handler.lookup_room_alias(
|
||||
room_alias
|
||||
)
|
||||
room_id = room_id.to_string()
|
||||
else:
|
||||
|
|
|
@ -129,6 +129,7 @@ class RoomCreationHandler(BaseHandler):
|
|||
old_room_id,
|
||||
new_version, # args for _upgrade_room
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -147,21 +148,22 @@ class RoomCreationHandler(BaseHandler):
|
|||
|
||||
# we create and auth the tombstone event before properly creating the new
|
||||
# room, to check our user has perms in the old room.
|
||||
tombstone_event, tombstone_context = (
|
||||
yield self.event_creation_handler.create_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Tombstone,
|
||||
"state_key": "",
|
||||
"room_id": old_room_id,
|
||||
"sender": user_id,
|
||||
"content": {
|
||||
"body": "This room has been replaced",
|
||||
"replacement_room": new_room_id,
|
||||
},
|
||||
(
|
||||
tombstone_event,
|
||||
tombstone_context,
|
||||
) = yield self.event_creation_handler.create_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Tombstone,
|
||||
"state_key": "",
|
||||
"room_id": old_room_id,
|
||||
"sender": user_id,
|
||||
"content": {
|
||||
"body": "This room has been replaced",
|
||||
"replacement_room": new_room_id,
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
old_room_version = yield self.store.get_room_version(old_room_id)
|
||||
yield self.auth.check_from_context(
|
||||
|
@ -188,7 +190,12 @@ class RoomCreationHandler(BaseHandler):
|
|||
requester, old_room_id, new_room_id, old_room_state
|
||||
)
|
||||
|
||||
# and finally, shut down the PLs in the old room, and update them in the new
|
||||
# Copy over user push rules, tags and migrate room directory state
|
||||
yield self.room_member_handler.transfer_room_state_on_room_upgrade(
|
||||
old_room_id, new_room_id
|
||||
)
|
||||
|
||||
# finally, shut down the PLs in the old room, and update them in the new
|
||||
# room.
|
||||
yield self._update_upgraded_room_pls(
|
||||
requester, old_room_id, new_room_id, old_room_state
|
||||
|
@ -822,6 +829,8 @@ class RoomContextHandler(object):
|
|||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_event_context(self, user, room_id, event_id, limit, event_filter):
|
||||
|
@ -848,7 +857,7 @@ class RoomContextHandler(object):
|
|||
|
||||
def filter_evts(events):
|
||||
return filter_events_for_client(
|
||||
self.store, user.to_string(), events, is_peeking=is_peeking
|
||||
self.storage, user.to_string(), events, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
event = yield self.store.get_event(
|
||||
|
@ -890,7 +899,7 @@ class RoomContextHandler(object):
|
|||
# first? Shouldn't we be consistent with /sync?
|
||||
# https://github.com/matrix-org/matrix-doc/issues/687
|
||||
|
||||
state = yield self.store.get_state_for_events(
|
||||
state = yield self.state_store.get_state_for_events(
|
||||
[last_event_id], state_filter=state_filter
|
||||
)
|
||||
results["state"] = list(state[last_event_id].values())
|
||||
|
@ -922,7 +931,7 @@ class RoomEventSource(object):
|
|||
|
||||
from_token = RoomStreamToken.parse(from_key)
|
||||
if from_token.topological:
|
||||
logger.warn("Stream has topological part!!!! %r", from_key)
|
||||
logger.warning("Stream has topological part!!!! %r", from_key)
|
||||
from_key = "s%s" % (from_token.stream,)
|
||||
|
||||
app_service = self.store.get_app_service_by_user_id(user.to_string())
|
||||
|
|
|
@ -203,10 +203,6 @@ class RoomMemberHandler(object):
|
|||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
if newly_joined:
|
||||
# Copy over user state if we're joining an upgraded room
|
||||
yield self.copy_user_state_if_room_upgrade(
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
yield self._user_joined_room(target, room_id)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
if prev_member_event_id:
|
||||
|
@ -455,11 +451,6 @@ class RoomMemberHandler(object):
|
|||
requester, remote_room_hosts, room_id, target, content
|
||||
)
|
||||
|
||||
# Copy over user state if this is a join on an remote upgraded room
|
||||
yield self.copy_user_state_if_room_upgrade(
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
|
||||
return remote_join_response
|
||||
|
||||
elif effective_membership_state == Membership.LEAVE:
|
||||
|
@ -498,36 +489,72 @@ class RoomMemberHandler(object):
|
|||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def copy_user_state_if_room_upgrade(self, new_room_id, user_id):
|
||||
"""Copy user-specific information when they join a new room if that new room is the
|
||||
result of a room upgrade
|
||||
def transfer_room_state_on_room_upgrade(self, old_room_id, room_id):
|
||||
"""Upon our server becoming aware of an upgraded room, either by upgrading a room
|
||||
ourselves or joining one, we can transfer over information from the previous room.
|
||||
|
||||
Copies user state (tags/push rules) for every local user that was in the old room, as
|
||||
well as migrating the room directory state.
|
||||
|
||||
Args:
|
||||
new_room_id (str): The ID of the room the user is joining
|
||||
user_id (str): The ID of the user
|
||||
old_room_id (str): The ID of the old room
|
||||
|
||||
room_id (str): The ID of the new room
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
# Find all local users that were in the old room and copy over each user's state
|
||||
users = yield self.store.get_users_in_room(old_room_id)
|
||||
yield self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
|
||||
|
||||
# Add new room to the room directory if the old room was there
|
||||
# Remove old room from the room directory
|
||||
old_room = yield self.store.get_room(old_room_id)
|
||||
if old_room and old_room["is_public"]:
|
||||
yield self.store.set_room_is_public(old_room_id, False)
|
||||
yield self.store.set_room_is_public(room_id, True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def copy_user_state_on_room_upgrade(self, old_room_id, new_room_id, user_ids):
|
||||
"""Copy user-specific information when they join a new room when that new room is the
|
||||
result of a room upgrade
|
||||
|
||||
Args:
|
||||
old_room_id (str): The ID of upgraded room
|
||||
new_room_id (str): The ID of the new room
|
||||
user_ids (Iterable[str]): User IDs to copy state for
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
# Check if the new room is an upgraded room
|
||||
predecessor = yield self.store.get_room_predecessor(new_room_id)
|
||||
if not predecessor:
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
"Found predecessor for %s: %s. Copying over room tags and push " "rules",
|
||||
"Copying over room tags and push rules from %s to %s for users %s",
|
||||
old_room_id,
|
||||
new_room_id,
|
||||
predecessor,
|
||||
user_ids,
|
||||
)
|
||||
|
||||
# It is an upgraded room. Copy over old tags
|
||||
yield self.copy_room_tags_and_direct_to_room(
|
||||
predecessor["room_id"], new_room_id, user_id
|
||||
)
|
||||
# Copy over push rules
|
||||
yield self.store.copy_push_rules_from_room_to_room_for_user(
|
||||
predecessor["room_id"], new_room_id, user_id
|
||||
)
|
||||
for user_id in user_ids:
|
||||
try:
|
||||
# It is an upgraded room. Copy over old tags
|
||||
yield self.copy_room_tags_and_direct_to_room(
|
||||
old_room_id, new_room_id, user_id
|
||||
)
|
||||
# Copy over push rules
|
||||
yield self.store.copy_push_rules_from_room_to_room_for_user(
|
||||
old_room_id, new_room_id, user_id
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
|
||||
"Skipping...",
|
||||
old_room_id,
|
||||
new_room_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_membership_event(self, requester, event, context, ratelimit=True):
|
||||
|
@ -759,22 +786,25 @@ class RoomMemberHandler(object):
|
|||
if room_avatar_event:
|
||||
room_avatar_url = room_avatar_event.content.get("url", "")
|
||||
|
||||
token, public_keys, fallback_public_key, display_name = (
|
||||
yield self.identity_handler.ask_id_server_for_third_party_invite(
|
||||
requester=requester,
|
||||
id_server=id_server,
|
||||
medium=medium,
|
||||
address=address,
|
||||
room_id=room_id,
|
||||
inviter_user_id=user.to_string(),
|
||||
room_alias=canonical_room_alias,
|
||||
room_avatar_url=room_avatar_url,
|
||||
room_join_rules=room_join_rules,
|
||||
room_name=room_name,
|
||||
inviter_display_name=inviter_display_name,
|
||||
inviter_avatar_url=inviter_avatar_url,
|
||||
id_access_token=id_access_token,
|
||||
)
|
||||
(
|
||||
token,
|
||||
public_keys,
|
||||
fallback_public_key,
|
||||
display_name,
|
||||
) = yield self.identity_handler.ask_id_server_for_third_party_invite(
|
||||
requester=requester,
|
||||
id_server=id_server,
|
||||
medium=medium,
|
||||
address=address,
|
||||
room_id=room_id,
|
||||
inviter_user_id=user.to_string(),
|
||||
room_alias=canonical_room_alias,
|
||||
room_avatar_url=room_avatar_url,
|
||||
room_join_rules=room_join_rules,
|
||||
room_name=room_name,
|
||||
inviter_display_name=inviter_display_name,
|
||||
inviter_avatar_url=inviter_avatar_url,
|
||||
id_access_token=id_access_token,
|
||||
)
|
||||
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
|
|
|
@ -35,6 +35,8 @@ class SearchHandler(BaseHandler):
|
|||
def __init__(self, hs):
|
||||
super(SearchHandler, self).__init__(hs)
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_old_rooms_from_upgraded_room(self, room_id):
|
||||
|
@ -221,7 +223,7 @@ class SearchHandler(BaseHandler):
|
|||
filtered_events = search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store, user.to_string(), filtered_events
|
||||
self.storage, user.to_string(), filtered_events
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
|
@ -271,7 +273,7 @@ class SearchHandler(BaseHandler):
|
|||
filtered_events = search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store, user.to_string(), filtered_events
|
||||
self.storage, user.to_string(), filtered_events
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
|
@ -340,11 +342,11 @@ class SearchHandler(BaseHandler):
|
|||
)
|
||||
|
||||
res["events_before"] = yield filter_events_for_client(
|
||||
self.store, user.to_string(), res["events_before"]
|
||||
self.storage, user.to_string(), res["events_before"]
|
||||
)
|
||||
|
||||
res["events_after"] = yield filter_events_for_client(
|
||||
self.store, user.to_string(), res["events_after"]
|
||||
self.storage, user.to_string(), res["events_after"]
|
||||
)
|
||||
|
||||
res["start"] = now_token.copy_and_replace(
|
||||
|
@ -372,7 +374,7 @@ class SearchHandler(BaseHandler):
|
|||
[(EventTypes.Member, sender) for sender in senders]
|
||||
)
|
||||
|
||||
state = yield self.store.get_state_for_event(
|
||||
state = yield self.state_store.get_state_for_event(
|
||||
last_event_id, state_filter
|
||||
)
|
||||
|
||||
|
@ -394,15 +396,11 @@ class SearchHandler(BaseHandler):
|
|||
time_now = self.clock.time_msec()
|
||||
|
||||
for context in contexts.values():
|
||||
context["events_before"] = (
|
||||
yield self._event_serializer.serialize_events(
|
||||
context["events_before"], time_now
|
||||
)
|
||||
context["events_before"] = yield self._event_serializer.serialize_events(
|
||||
context["events_before"], time_now
|
||||
)
|
||||
context["events_after"] = (
|
||||
yield self._event_serializer.serialize_events(
|
||||
context["events_after"], time_now
|
||||
)
|
||||
context["events_after"] = yield self._event_serializer.serialize_events(
|
||||
context["events_after"], time_now
|
||||
)
|
||||
|
||||
state_results = {}
|
||||
|
|
|
@ -108,7 +108,10 @@ class StatsHandler(StateDeltasHandler):
|
|||
user_deltas = {}
|
||||
|
||||
# Then count deltas for total_events and total_event_bytes.
|
||||
room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
|
||||
(
|
||||
room_count,
|
||||
user_count,
|
||||
) = yield self.store.get_changes_room_total_events_and_bytes(
|
||||
self.pos, max_pos
|
||||
)
|
||||
|
||||
|
|
|
@ -230,6 +230,8 @@ class SyncHandler(object):
|
|||
self.response_cache = ResponseCache(hs, "sync")
|
||||
self.state = hs.get_state_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
# ExpiringCache((User, Device)) -> LruCache(state_key => event_id)
|
||||
self.lazy_loaded_members_cache = ExpiringCache(
|
||||
|
@ -417,7 +419,7 @@ class SyncHandler(object):
|
|||
current_state_ids = frozenset(itervalues(current_state_ids))
|
||||
|
||||
recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
self.storage,
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
|
@ -470,7 +472,7 @@ class SyncHandler(object):
|
|||
current_state_ids = frozenset(itervalues(current_state_ids))
|
||||
|
||||
loaded_recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
self.storage,
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
|
@ -509,7 +511,7 @@ class SyncHandler(object):
|
|||
Returns:
|
||||
A Deferred map from ((type, state_key)->Event)
|
||||
"""
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
event.event_id, state_filter=state_filter
|
||||
)
|
||||
if event.is_state():
|
||||
|
@ -580,7 +582,7 @@ class SyncHandler(object):
|
|||
return None
|
||||
|
||||
last_event = last_events[-1]
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
last_event.event_id,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")]
|
||||
|
@ -757,11 +759,11 @@ class SyncHandler(object):
|
|||
|
||||
if full_state:
|
||||
if batch:
|
||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||
current_state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[-1].event_id, state_filter=state_filter
|
||||
)
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, state_filter=state_filter
|
||||
)
|
||||
|
||||
|
@ -781,7 +783,7 @@ class SyncHandler(object):
|
|||
)
|
||||
elif batch.limited:
|
||||
if batch:
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
state_at_timeline_start = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, state_filter=state_filter
|
||||
)
|
||||
else:
|
||||
|
@ -810,7 +812,7 @@ class SyncHandler(object):
|
|||
)
|
||||
|
||||
if batch:
|
||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||
current_state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[-1].event_id, state_filter=state_filter
|
||||
)
|
||||
else:
|
||||
|
@ -841,7 +843,7 @@ class SyncHandler(object):
|
|||
# So we fish out all the member events corresponding to the
|
||||
# timeline here, and then dedupe any redundant ones below.
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
# we only want members!
|
||||
state_filter=StateFilter.from_types(
|
||||
|
@ -1204,10 +1206,11 @@ class SyncHandler(object):
|
|||
since_token = sync_result_builder.since_token
|
||||
|
||||
if since_token and not sync_result_builder.full_state:
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_updated_account_data_for_user(
|
||||
user_id, since_token.account_data_key
|
||||
)
|
||||
(
|
||||
account_data,
|
||||
account_data_by_room,
|
||||
) = yield self.store.get_updated_account_data_for_user(
|
||||
user_id, since_token.account_data_key
|
||||
)
|
||||
|
||||
push_rules_changed = yield self.store.have_push_rules_changed_for_user(
|
||||
|
@ -1219,9 +1222,10 @@ class SyncHandler(object):
|
|||
sync_config.user
|
||||
)
|
||||
else:
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_account_data_for_user(sync_config.user.to_string())
|
||||
)
|
||||
(
|
||||
account_data,
|
||||
account_data_by_room,
|
||||
) = yield self.store.get_account_data_for_user(sync_config.user.to_string())
|
||||
|
||||
account_data["m.push_rules"] = yield self.push_rules_for_user(
|
||||
sync_config.user
|
||||
|
|
|
@ -81,7 +81,7 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
|
|||
def __init__(self, hs):
|
||||
super().__init__(hs)
|
||||
self._enabled = bool(hs.config.recaptcha_private_key)
|
||||
self._http_client = hs.get_simple_http_client()
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._url = hs.config.recaptcha_siteverify_api
|
||||
self._secret = hs.config.recaptcha_private_key
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ from synapse.http import (
|
|||
cancelled_to_request_timed_out_error,
|
||||
redact_uri,
|
||||
)
|
||||
from synapse.http.proxyagent import ProxyAgent
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
|
@ -183,7 +184,15 @@ class SimpleHttpClient(object):
|
|||
using HTTP in Matrix
|
||||
"""
|
||||
|
||||
def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None):
|
||||
def __init__(
|
||||
self,
|
||||
hs,
|
||||
treq_args={},
|
||||
ip_whitelist=None,
|
||||
ip_blacklist=None,
|
||||
http_proxy=None,
|
||||
https_proxy=None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
|
@ -192,6 +201,8 @@ class SimpleHttpClient(object):
|
|||
we may not request.
|
||||
ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can
|
||||
request if it were otherwise caught in a blacklist.
|
||||
http_proxy (bytes): proxy server to use for http connections. host[:port]
|
||||
https_proxy (bytes): proxy server to use for https connections. host[:port]
|
||||
"""
|
||||
self.hs = hs
|
||||
|
||||
|
@ -236,11 +247,13 @@ class SimpleHttpClient(object):
|
|||
# The default context factory in Twisted 14.0.0 (which we require) is
|
||||
# BrowserLikePolicyForHTTPS which will do regular cert validation
|
||||
# 'like a browser'
|
||||
self.agent = Agent(
|
||||
self.agent = ProxyAgent(
|
||||
self.reactor,
|
||||
connectTimeout=15,
|
||||
contextFactory=self.hs.get_http_client_context_factory(),
|
||||
pool=pool,
|
||||
http_proxy=http_proxy,
|
||||
https_proxy=https_proxy,
|
||||
)
|
||||
|
||||
if self._ip_blacklist:
|
||||
|
@ -535,7 +548,7 @@ class SimpleHttpClient(object):
|
|||
b"Content-Length" in resp_headers
|
||||
and int(resp_headers[b"Content-Length"][0]) > max_size
|
||||
):
|
||||
logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
|
||||
logger.warning("Requested URL is too large > %r bytes" % (self.max_size,))
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (self.max_size,),
|
||||
|
@ -543,7 +556,7 @@ class SimpleHttpClient(object):
|
|||
)
|
||||
|
||||
if response.code > 299:
|
||||
logger.warn("Got %d when downloading %s" % (response.code, url))
|
||||
logger.warning("Got %d when downloading %s" % (response.code, url))
|
||||
raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
|
||||
|
||||
# TODO: if our Content-Type is HTML or something, just read the first
|
||||
|
|
195
synapse/http/connectproxyclient.py
Normal file
195
synapse/http/connectproxyclient.py
Normal file
|
@ -0,0 +1,195 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer, protocol
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.internet.interfaces import IStreamClientEndpoint
|
||||
from twisted.internet.protocol import connectionDone
|
||||
from twisted.web import http
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProxyConnectError(ConnectError):
|
||||
pass
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpoint)
|
||||
class HTTPConnectProxyEndpoint(object):
|
||||
"""An Endpoint implementation which will send a CONNECT request to an http proxy
|
||||
|
||||
Wraps an existing HostnameEndpoint for the proxy.
|
||||
|
||||
When we get the connect() request from the connection pool (via the TLS wrapper),
|
||||
we'll first connect to the proxy endpoint with a ProtocolFactory which will make the
|
||||
CONNECT request. Once that completes, we invoke the protocolFactory which was passed
|
||||
in.
|
||||
|
||||
Args:
|
||||
reactor: the Twisted reactor to use for the connection
|
||||
proxy_endpoint (IStreamClientEndpoint): the endpoint to use to connect to the
|
||||
proxy
|
||||
host (bytes): hostname that we want to CONNECT to
|
||||
port (int): port that we want to connect to
|
||||
"""
|
||||
|
||||
def __init__(self, reactor, proxy_endpoint, host, port):
|
||||
self._reactor = reactor
|
||||
self._proxy_endpoint = proxy_endpoint
|
||||
self._host = host
|
||||
self._port = port
|
||||
|
||||
def __repr__(self):
|
||||
return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
|
||||
|
||||
def connect(self, protocolFactory):
|
||||
f = HTTPProxiedClientFactory(self._host, self._port, protocolFactory)
|
||||
d = self._proxy_endpoint.connect(f)
|
||||
# once the tcp socket connects successfully, we need to wait for the
|
||||
# CONNECT to complete.
|
||||
d.addCallback(lambda conn: f.on_connection)
|
||||
return d
|
||||
|
||||
|
||||
class HTTPProxiedClientFactory(protocol.ClientFactory):
|
||||
"""ClientFactory wrapper that triggers an HTTP proxy CONNECT on connect.
|
||||
|
||||
Once the CONNECT completes, invokes the original ClientFactory to build the
|
||||
HTTP Protocol object and run the rest of the connection.
|
||||
|
||||
Args:
|
||||
dst_host (bytes): hostname that we want to CONNECT to
|
||||
dst_port (int): port that we want to connect to
|
||||
wrapped_factory (protocol.ClientFactory): The original Factory
|
||||
"""
|
||||
|
||||
def __init__(self, dst_host, dst_port, wrapped_factory):
|
||||
self.dst_host = dst_host
|
||||
self.dst_port = dst_port
|
||||
self.wrapped_factory = wrapped_factory
|
||||
self.on_connection = defer.Deferred()
|
||||
|
||||
def startedConnecting(self, connector):
|
||||
return self.wrapped_factory.startedConnecting(connector)
|
||||
|
||||
def buildProtocol(self, addr):
|
||||
wrapped_protocol = self.wrapped_factory.buildProtocol(addr)
|
||||
|
||||
return HTTPConnectProtocol(
|
||||
self.dst_host, self.dst_port, wrapped_protocol, self.on_connection
|
||||
)
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
logger.debug("Connection to proxy failed: %s", reason)
|
||||
if not self.on_connection.called:
|
||||
self.on_connection.errback(reason)
|
||||
return self.wrapped_factory.clientConnectionFailed(connector, reason)
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
logger.debug("Connection to proxy lost: %s", reason)
|
||||
if not self.on_connection.called:
|
||||
self.on_connection.errback(reason)
|
||||
return self.wrapped_factory.clientConnectionLost(connector, reason)
|
||||
|
||||
|
||||
class HTTPConnectProtocol(protocol.Protocol):
|
||||
"""Protocol that wraps an existing Protocol to do a CONNECT handshake at connect
|
||||
|
||||
Args:
|
||||
host (bytes): The original HTTP(s) hostname or IPv4 or IPv6 address literal
|
||||
to put in the CONNECT request
|
||||
|
||||
port (int): The original HTTP(s) port to put in the CONNECT request
|
||||
|
||||
wrapped_protocol (interfaces.IProtocol): the original protocol (probably
|
||||
HTTPChannel or TLSMemoryBIOProtocol, but could be anything really)
|
||||
|
||||
connected_deferred (Deferred): a Deferred which will be callbacked with
|
||||
wrapped_protocol when the CONNECT completes
|
||||
"""
|
||||
|
||||
def __init__(self, host, port, wrapped_protocol, connected_deferred):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.wrapped_protocol = wrapped_protocol
|
||||
self.connected_deferred = connected_deferred
|
||||
self.http_setup_client = HTTPConnectSetupClient(self.host, self.port)
|
||||
self.http_setup_client.on_connected.addCallback(self.proxyConnected)
|
||||
|
||||
def connectionMade(self):
|
||||
self.http_setup_client.makeConnection(self.transport)
|
||||
|
||||
def connectionLost(self, reason=connectionDone):
|
||||
if self.wrapped_protocol.connected:
|
||||
self.wrapped_protocol.connectionLost(reason)
|
||||
|
||||
self.http_setup_client.connectionLost(reason)
|
||||
|
||||
if not self.connected_deferred.called:
|
||||
self.connected_deferred.errback(reason)
|
||||
|
||||
def proxyConnected(self, _):
|
||||
self.wrapped_protocol.makeConnection(self.transport)
|
||||
|
||||
self.connected_deferred.callback(self.wrapped_protocol)
|
||||
|
||||
# Get any pending data from the http buf and forward it to the original protocol
|
||||
buf = self.http_setup_client.clearLineBuffer()
|
||||
if buf:
|
||||
self.wrapped_protocol.dataReceived(buf)
|
||||
|
||||
def dataReceived(self, data):
|
||||
# if we've set up the HTTP protocol, we can send the data there
|
||||
if self.wrapped_protocol.connected:
|
||||
return self.wrapped_protocol.dataReceived(data)
|
||||
|
||||
# otherwise, we must still be setting up the connection: send the data to the
|
||||
# setup client
|
||||
return self.http_setup_client.dataReceived(data)
|
||||
|
||||
|
||||
class HTTPConnectSetupClient(http.HTTPClient):
|
||||
"""HTTPClient protocol to send a CONNECT message for proxies and read the response.
|
||||
|
||||
Args:
|
||||
host (bytes): The hostname to send in the CONNECT message
|
||||
port (int): The port to send in the CONNECT message
|
||||
"""
|
||||
|
||||
def __init__(self, host, port):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.on_connected = defer.Deferred()
|
||||
|
||||
def connectionMade(self):
|
||||
logger.debug("Connected to proxy, sending CONNECT")
|
||||
self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
|
||||
self.endHeaders()
|
||||
|
||||
def handleStatus(self, version, status, message):
|
||||
logger.debug("Got Status: %s %s %s", status, message, version)
|
||||
if status != b"200":
|
||||
raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
|
||||
|
||||
def handleEndHeaders(self):
|
||||
logger.debug("End Headers")
|
||||
self.on_connected.callback(None)
|
||||
|
||||
def handleResponse(self, body):
|
||||
pass
|
|
@ -148,7 +148,7 @@ class SrvResolver(object):
|
|||
# Try something in the cache, else rereaise
|
||||
cache_entry = self._cache.get(service_name, None)
|
||||
if cache_entry:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to resolve %r, falling back to cache. %r", service_name, e
|
||||
)
|
||||
return list(cache_entry)
|
||||
|
|
|
@ -149,7 +149,7 @@ def _handle_json_response(reactor, timeout_sec, request, response):
|
|||
|
||||
body = yield make_deferred_yieldable(d)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
|
@ -457,7 +457,7 @@ class MatrixFederationHttpClient(object):
|
|||
except Exception as e:
|
||||
# Eh, we're already going to raise an exception so lets
|
||||
# ignore if this fails.
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Failed to get error response: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
|
@ -478,7 +478,7 @@ class MatrixFederationHttpClient(object):
|
|||
|
||||
break
|
||||
except RequestSendFailed as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Request failed: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
|
@ -513,7 +513,7 @@ class MatrixFederationHttpClient(object):
|
|||
raise
|
||||
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Request failed: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
|
@ -889,7 +889,7 @@ class MatrixFederationHttpClient(object):
|
|||
d.addTimeout(self.default_timeout, self.reactor)
|
||||
length = yield make_deferred_yieldable(d)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
|
|
195
synapse/http/proxyagent.py
Normal file
195
synapse/http/proxyagent.py
Normal file
|
@ -0,0 +1,195 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
|
||||
from twisted.web.error import SchemeNotSupported
|
||||
from twisted.web.iweb import IAgent
|
||||
|
||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
|
||||
|
||||
|
||||
@implementer(IAgent)
|
||||
class ProxyAgent(_AgentBase):
|
||||
"""An Agent implementation which will use an HTTP proxy if one was requested
|
||||
|
||||
Args:
|
||||
reactor: twisted reactor to place outgoing
|
||||
connections.
|
||||
|
||||
contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
|
||||
verification parameters of OpenSSL. The default is to use a
|
||||
`BrowserLikePolicyForHTTPS`, so unless you have special
|
||||
requirements you can leave this as-is.
|
||||
|
||||
connectTimeout (float): The amount of time that this Agent will wait
|
||||
for the peer to accept a connection.
|
||||
|
||||
bindAddress (bytes): The local address for client sockets to bind to.
|
||||
|
||||
pool (HTTPConnectionPool|None): connection pool to be used. If None, a
|
||||
non-persistent pool instance will be created.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
reactor,
|
||||
contextFactory=BrowserLikePolicyForHTTPS(),
|
||||
connectTimeout=None,
|
||||
bindAddress=None,
|
||||
pool=None,
|
||||
http_proxy=None,
|
||||
https_proxy=None,
|
||||
):
|
||||
_AgentBase.__init__(self, reactor, pool)
|
||||
|
||||
self._endpoint_kwargs = {}
|
||||
if connectTimeout is not None:
|
||||
self._endpoint_kwargs["timeout"] = connectTimeout
|
||||
if bindAddress is not None:
|
||||
self._endpoint_kwargs["bindAddress"] = bindAddress
|
||||
|
||||
self.http_proxy_endpoint = _http_proxy_endpoint(
|
||||
http_proxy, reactor, **self._endpoint_kwargs
|
||||
)
|
||||
|
||||
self.https_proxy_endpoint = _http_proxy_endpoint(
|
||||
https_proxy, reactor, **self._endpoint_kwargs
|
||||
)
|
||||
|
||||
self._policy_for_https = contextFactory
|
||||
self._reactor = reactor
|
||||
|
||||
def request(self, method, uri, headers=None, bodyProducer=None):
|
||||
"""
|
||||
Issue a request to the server indicated by the given uri.
|
||||
|
||||
Supports `http` and `https` schemes.
|
||||
|
||||
An existing connection from the connection pool may be used or a new one may be
|
||||
created.
|
||||
|
||||
See also: twisted.web.iweb.IAgent.request
|
||||
|
||||
Args:
|
||||
method (bytes): The request method to use, such as `GET`, `POST`, etc
|
||||
|
||||
uri (bytes): The location of the resource to request.
|
||||
|
||||
headers (Headers|None): Extra headers to send with the request
|
||||
|
||||
bodyProducer (IBodyProducer|None): An object which can generate bytes to
|
||||
make up the body of this request (for example, the properly encoded
|
||||
contents of a file for a file upload). Or, None if the request is to
|
||||
have no body.
|
||||
|
||||
Returns:
|
||||
Deferred[IResponse]: completes when the header of the response has
|
||||
been received (regardless of the response status code).
|
||||
"""
|
||||
uri = uri.strip()
|
||||
if not _VALID_URI.match(uri):
|
||||
raise ValueError("Invalid URI {!r}".format(uri))
|
||||
|
||||
parsed_uri = URI.fromBytes(uri)
|
||||
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
|
||||
request_path = parsed_uri.originForm
|
||||
|
||||
if parsed_uri.scheme == b"http" and self.http_proxy_endpoint:
|
||||
# Cache *all* connections under the same key, since we are only
|
||||
# connecting to a single destination, the proxy:
|
||||
pool_key = ("http-proxy", self.http_proxy_endpoint)
|
||||
endpoint = self.http_proxy_endpoint
|
||||
request_path = uri
|
||||
elif parsed_uri.scheme == b"https" and self.https_proxy_endpoint:
|
||||
endpoint = HTTPConnectProxyEndpoint(
|
||||
self._reactor,
|
||||
self.https_proxy_endpoint,
|
||||
parsed_uri.host,
|
||||
parsed_uri.port,
|
||||
)
|
||||
else:
|
||||
# not using a proxy
|
||||
endpoint = HostnameEndpoint(
|
||||
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
|
||||
)
|
||||
|
||||
logger.debug("Requesting %s via %s", uri, endpoint)
|
||||
|
||||
if parsed_uri.scheme == b"https":
|
||||
tls_connection_creator = self._policy_for_https.creatorForNetloc(
|
||||
parsed_uri.host, parsed_uri.port
|
||||
)
|
||||
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
|
||||
elif parsed_uri.scheme == b"http":
|
||||
pass
|
||||
else:
|
||||
return defer.fail(
|
||||
Failure(
|
||||
SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
|
||||
)
|
||||
)
|
||||
|
||||
return self._requestWithEndpoint(
|
||||
pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
|
||||
)
|
||||
|
||||
|
||||
def _http_proxy_endpoint(proxy, reactor, **kwargs):
|
||||
"""Parses an http proxy setting and returns an endpoint for the proxy
|
||||
|
||||
Args:
|
||||
proxy (bytes|None): the proxy setting
|
||||
reactor: reactor to be used to connect to the proxy
|
||||
kwargs: other args to be passed to HostnameEndpoint
|
||||
|
||||
Returns:
|
||||
interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
|
||||
or None
|
||||
"""
|
||||
if proxy is None:
|
||||
return None
|
||||
|
||||
# currently we only support hostname:port. Some apps also support
|
||||
# protocol://<host>[:port], which allows a way of requiring a TLS connection to the
|
||||
# proxy.
|
||||
|
||||
host, port = parse_host_port(proxy, default_port=1080)
|
||||
return HostnameEndpoint(reactor, host, port, **kwargs)
|
||||
|
||||
|
||||
def parse_host_port(hostport, default_port=None):
|
||||
# could have sworn we had one of these somewhere else...
|
||||
if b":" in hostport:
|
||||
host, port = hostport.rsplit(b":", 1)
|
||||
try:
|
||||
port = int(port)
|
||||
return host, port
|
||||
except ValueError:
|
||||
# the thing after the : wasn't a valid port; presumably this is an
|
||||
# IPv6 address.
|
||||
pass
|
||||
|
||||
return hostport, default_port
|
|
@ -170,7 +170,7 @@ class RequestMetrics(object):
|
|||
tag = context.tag
|
||||
|
||||
if context != self.start_context:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Context have unexpectedly changed %r, %r",
|
||||
context,
|
||||
self.start_context,
|
||||
|
|
|
@ -454,7 +454,7 @@ def respond_with_json(
|
|||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||
# a waste.
|
||||
if request._disconnected:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return
|
||||
|
|
|
@ -219,13 +219,13 @@ def parse_json_value_from_request(request, allow_empty_body=False):
|
|||
try:
|
||||
content_unicode = content_bytes.decode("utf8")
|
||||
except UnicodeDecodeError:
|
||||
logger.warn("Unable to decode UTF-8")
|
||||
logger.warning("Unable to decode UTF-8")
|
||||
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
||||
|
||||
try:
|
||||
content = json.loads(content_unicode)
|
||||
except Exception as e:
|
||||
logger.warn("Unable to parse JSON: %s", e)
|
||||
logger.warning("Unable to parse JSON: %s", e)
|
||||
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
||||
|
||||
return content
|
||||
|
|
|
@ -199,7 +199,7 @@ class SynapseRequest(Request):
|
|||
# It's useful to log it here so that we can get an idea of when
|
||||
# the client disconnects.
|
||||
with PreserveLoggingContext(self.logcontext):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Error processing request %r: %s %s", self, reason.type, reason.value
|
||||
)
|
||||
|
||||
|
@ -305,7 +305,7 @@ class SynapseRequest(Request):
|
|||
try:
|
||||
self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
|
||||
except Exception as e:
|
||||
logger.warn("Failed to stop metrics: %r", e)
|
||||
logger.warning("Failed to stop metrics: %r", e)
|
||||
|
||||
|
||||
class XForwardedForRequest(SynapseRequest):
|
||||
|
|
|
@ -185,7 +185,7 @@ DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}}
|
|||
|
||||
|
||||
def parse_drain_configs(
|
||||
drains: dict
|
||||
drains: dict,
|
||||
) -> typing.Generator[DrainConfiguration, None, None]:
|
||||
"""
|
||||
Parse the drain configurations.
|
||||
|
|
|
@ -294,7 +294,7 @@ class LoggingContext(object):
|
|||
"""Enters this logging context into thread local storage"""
|
||||
old_context = self.set_current_context(self)
|
||||
if self.previous_context != old_context:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Expected previous context %r, found %r",
|
||||
self.previous_context,
|
||||
old_context,
|
||||
|
|
|
@ -159,6 +159,7 @@ class Notifier(object):
|
|||
self.room_to_user_streams = {}
|
||||
|
||||
self.hs = hs
|
||||
self.storage = hs.get_storage()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.store = hs.get_datastore()
|
||||
self.pending_new_room_events = []
|
||||
|
@ -425,7 +426,10 @@ class Notifier(object):
|
|||
|
||||
if name == "room":
|
||||
new_events = yield filter_events_for_client(
|
||||
self.store, user.to_string(), new_events, is_peeking=is_peeking
|
||||
self.storage,
|
||||
user.to_string(),
|
||||
new_events,
|
||||
is_peeking=is_peeking,
|
||||
)
|
||||
elif name == "presence":
|
||||
now = self.clock.time_msec()
|
||||
|
|
|
@ -79,7 +79,7 @@ class BulkPushRuleEvaluator(object):
|
|||
dict of user_id -> push_rules
|
||||
"""
|
||||
room_id = event.room_id
|
||||
rules_for_room = self._get_rules_for_room(room_id)
|
||||
rules_for_room = yield self._get_rules_for_room(room_id)
|
||||
|
||||
rules_by_user = yield rules_for_room.get_rules(event, context)
|
||||
|
||||
|
@ -149,9 +149,10 @@ class BulkPushRuleEvaluator(object):
|
|||
|
||||
room_members = yield self.store.get_joined_users_from_context(event, context)
|
||||
|
||||
(power_levels, sender_power_level) = (
|
||||
yield self._get_power_levels_and_sender_level(event, context)
|
||||
)
|
||||
(
|
||||
power_levels,
|
||||
sender_power_level,
|
||||
) = yield self._get_power_levels_and_sender_level(event, context)
|
||||
|
||||
evaluator = PushRuleEvaluatorForEvent(
|
||||
event, len(room_members), sender_power_level, power_levels
|
||||
|
|
|
@ -234,14 +234,12 @@ class EmailPusher(object):
|
|||
return
|
||||
|
||||
self.last_stream_ordering = last_stream_ordering
|
||||
pusher_still_exists = (
|
||||
yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.email,
|
||||
self.user_id,
|
||||
last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.email,
|
||||
self.user_id,
|
||||
last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
if not pusher_still_exists:
|
||||
# The pusher has been deleted while we were processing, so
|
||||
|
|
|
@ -64,6 +64,7 @@ class HttpPusher(object):
|
|||
def __init__(self, hs, pusherdict):
|
||||
self.hs = hs
|
||||
self.store = self.hs.get_datastore()
|
||||
self.storage = self.hs.get_storage()
|
||||
self.clock = self.hs.get_clock()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
self.user_id = pusherdict["user_name"]
|
||||
|
@ -102,7 +103,7 @@ class HttpPusher(object):
|
|||
if "url" not in self.data:
|
||||
raise PusherConfigException("'url' required in data for HTTP pusher")
|
||||
self.url = self.data["url"]
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.http_client = hs.get_proxied_http_client()
|
||||
self.data_minus_url = {}
|
||||
self.data_minus_url.update(self.data)
|
||||
del self.data_minus_url["url"]
|
||||
|
@ -210,14 +211,12 @@ class HttpPusher(object):
|
|||
http_push_processed_counter.inc()
|
||||
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
|
||||
self.last_stream_ordering = push_action["stream_ordering"]
|
||||
pusher_still_exists = (
|
||||
yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.pushkey,
|
||||
self.user_id,
|
||||
self.last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.pushkey,
|
||||
self.user_id,
|
||||
self.last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
if not pusher_still_exists:
|
||||
# The pusher has been deleted while we were processing, so
|
||||
|
@ -246,7 +245,7 @@ class HttpPusher(object):
|
|||
# we really only give up so that if the URL gets
|
||||
# fixed, we don't suddenly deliver a load
|
||||
# of old notifications.
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Giving up on a notification to user %s, " "pushkey %s",
|
||||
self.user_id,
|
||||
self.pushkey,
|
||||
|
@ -299,7 +298,7 @@ class HttpPusher(object):
|
|||
if pk != self.pushkey:
|
||||
# for sanity, we only remove the pushkey if it
|
||||
# was the one we actually sent...
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
("Ignoring rejected pushkey %s because we" " didn't send it"),
|
||||
pk,
|
||||
)
|
||||
|
@ -329,7 +328,7 @@ class HttpPusher(object):
|
|||
return d
|
||||
|
||||
ctx = yield push_tools.get_context_for_event(
|
||||
self.store, self.state_handler, event, self.user_id
|
||||
self.storage, self.state_handler, event, self.user_id
|
||||
)
|
||||
|
||||
d = {
|
||||
|
|
|
@ -119,6 +119,7 @@ class Mailer(object):
|
|||
self.store = self.hs.get_datastore()
|
||||
self.macaroon_gen = self.hs.get_macaroon_generator()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
self.storage = hs.get_storage()
|
||||
self.app_name = app_name
|
||||
|
||||
logger.info("Created Mailer for app_name %s" % app_name)
|
||||
|
@ -389,7 +390,7 @@ class Mailer(object):
|
|||
}
|
||||
|
||||
the_events = yield filter_events_for_client(
|
||||
self.store, user_id, results["events_before"]
|
||||
self.storage, user_id, results["events_before"]
|
||||
)
|
||||
the_events.append(notif_event)
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ class PushRuleEvaluatorForEvent(object):
|
|||
pattern = UserID.from_string(user_id).localpart
|
||||
|
||||
if not pattern:
|
||||
logger.warn("event_match condition with no pattern")
|
||||
logger.warning("event_match condition with no pattern")
|
||||
return False
|
||||
|
||||
# XXX: optimisation: cache our pattern regexps
|
||||
|
@ -173,7 +173,7 @@ def _glob_matches(glob, value, word_boundary=False):
|
|||
regex_cache[(glob, word_boundary)] = r
|
||||
return r.search(value)
|
||||
except re.error:
|
||||
logger.warn("Failed to parse glob to regex: %r", glob)
|
||||
logger.warning("Failed to parse glob to regex: %r", glob)
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
|
||||
from synapse.storage import Storage
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -43,22 +44,22 @@ def get_badge_count(store, user_id):
|
|||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_context_for_event(store, state_handler, ev, user_id):
|
||||
def get_context_for_event(storage: Storage, state_handler, ev, user_id):
|
||||
ctx = {}
|
||||
|
||||
room_state_ids = yield store.get_state_ids_for_event(ev.event_id)
|
||||
room_state_ids = yield storage.state.get_state_ids_for_event(ev.event_id)
|
||||
|
||||
# we no longer bother setting room_alias, and make room_name the
|
||||
# human-readable name instead, be that m.room.name, an alias or
|
||||
# a list of people in the room
|
||||
name = yield calculate_room_name(
|
||||
store, room_state_ids, user_id, fallback_to_single_member=False
|
||||
storage.main, room_state_ids, user_id, fallback_to_single_member=False
|
||||
)
|
||||
if name:
|
||||
ctx["name"] = name
|
||||
|
||||
sender_state_event_id = room_state_ids[("m.room.member", ev.sender)]
|
||||
sender_state_event = yield store.get_event(sender_state_event_id)
|
||||
sender_state_event = yield storage.main.get_event(sender_state_event_id)
|
||||
ctx["sender_display_name"] = name_from_member_event(sender_state_event)
|
||||
|
||||
return ctx
|
||||
|
|
|
@ -103,9 +103,7 @@ class PusherPool:
|
|||
# create the pusher setting last_stream_ordering to the current maximum
|
||||
# stream ordering in event_push_actions, so it will process
|
||||
# pushes from this point onwards.
|
||||
last_stream_ordering = (
|
||||
yield self.store.get_latest_push_action_stream_ordering()
|
||||
)
|
||||
last_stream_ordering = yield self.store.get_latest_push_action_stream_ordering()
|
||||
|
||||
yield self.store.add_pusher(
|
||||
user_id=user_id,
|
||||
|
|
|
@ -61,7 +61,6 @@ REQUIREMENTS = [
|
|||
"bcrypt>=3.1.0",
|
||||
"pillow>=4.3.0",
|
||||
"sortedcontainers>=1.4.4",
|
||||
"psutil>=2.0.0",
|
||||
"pymacaroons>=0.13.0",
|
||||
"msgpack>=0.5.2",
|
||||
"phonenumbers>=8.2.0",
|
||||
|
|
|
@ -110,14 +110,14 @@ class ReplicationEndpoint(object):
|
|||
return {}
|
||||
|
||||
@abc.abstractmethod
|
||||
def _handle_request(self, request, **kwargs):
|
||||
async def _handle_request(self, request, **kwargs):
|
||||
"""Handle incoming request.
|
||||
|
||||
This is called with the request object and PATH_ARGS.
|
||||
|
||||
Returns:
|
||||
Deferred[dict]: A JSON serialisable dict to be used as response
|
||||
body of request.
|
||||
tuple[int, dict]: HTTP status code and a JSON serialisable dict
|
||||
to be used as response body of request.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
@ -180,7 +180,7 @@ class ReplicationEndpoint(object):
|
|||
if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
|
||||
raise
|
||||
|
||||
logger.warn("%s request timed out", cls.NAME)
|
||||
logger.warning("%s request timed out", cls.NAME)
|
||||
|
||||
# If we timed out we probably don't need to worry about backing
|
||||
# off too much, but lets just wait a little anyway.
|
||||
|
|
|
@ -82,8 +82,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
|||
|
||||
return payload
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request):
|
||||
async def _handle_request(self, request):
|
||||
with Measure(self.clock, "repl_fed_send_events_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
|
@ -101,15 +100,13 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
|||
EventType = event_type_from_format_version(format_ver)
|
||||
event = EventType(event_dict, internal_metadata, rejected_reason)
|
||||
|
||||
context = yield EventContext.deserialize(
|
||||
self.store, event_payload["context"]
|
||||
)
|
||||
context = EventContext.deserialize(self.store, event_payload["context"])
|
||||
|
||||
event_and_contexts.append((event, context))
|
||||
|
||||
logger.info("Got %d events from federation", len(event_and_contexts))
|
||||
|
||||
yield self.federation_handler.persist_events_and_notify(
|
||||
await self.federation_handler.persist_events_and_notify(
|
||||
event_and_contexts, backfilled
|
||||
)
|
||||
|
||||
|
@ -144,8 +141,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
|
|||
def _serialize_payload(edu_type, origin, content):
|
||||
return {"origin": origin, "content": content}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, edu_type):
|
||||
async def _handle_request(self, request, edu_type):
|
||||
with Measure(self.clock, "repl_fed_send_edu_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
|
@ -154,7 +150,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
|
|||
|
||||
logger.info("Got %r edu from %s", edu_type, origin)
|
||||
|
||||
result = yield self.registry.on_edu(edu_type, origin, edu_content)
|
||||
result = await self.registry.on_edu(edu_type, origin, edu_content)
|
||||
|
||||
return 200, result
|
||||
|
||||
|
@ -193,8 +189,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
|
|||
"""
|
||||
return {"args": args}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, query_type):
|
||||
async def _handle_request(self, request, query_type):
|
||||
with Measure(self.clock, "repl_fed_query_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
|
@ -202,7 +197,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
|
|||
|
||||
logger.info("Got %r query", query_type)
|
||||
|
||||
result = yield self.registry.on_query(query_type, args)
|
||||
result = await self.registry.on_query(query_type, args)
|
||||
|
||||
return 200, result
|
||||
|
||||
|
@ -234,9 +229,8 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
|
|||
"""
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, room_id):
|
||||
yield self.store.clean_room_for_join(room_id)
|
||||
async def _handle_request(self, request, room_id):
|
||||
await self.store.clean_room_for_join(room_id)
|
||||
|
||||
return 200, {}
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
|
||||
|
@ -52,15 +50,14 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
|||
"is_guest": is_guest,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
async def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
device_id = content["device_id"]
|
||||
initial_display_name = content["initial_display_name"]
|
||||
is_guest = content["is_guest"]
|
||||
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
device_id, access_token = await self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, is_guest
|
||||
)
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import Requester, UserID
|
||||
|
@ -65,8 +63,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
|||
"content": content,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, room_id, user_id):
|
||||
async def _handle_request(self, request, room_id, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
remote_room_hosts = content["remote_room_hosts"]
|
||||
|
@ -79,7 +76,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
|||
|
||||
logger.info("remote_join: %s into room: %s", user_id, room_id)
|
||||
|
||||
yield self.federation_handler.do_invite_join(
|
||||
await self.federation_handler.do_invite_join(
|
||||
remote_room_hosts, room_id, user_id, event_content
|
||||
)
|
||||
|
||||
|
@ -123,8 +120,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||
"remote_room_hosts": remote_room_hosts,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, room_id, user_id):
|
||||
async def _handle_request(self, request, room_id, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
remote_room_hosts = content["remote_room_hosts"]
|
||||
|
@ -137,7 +133,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||
logger.info("remote_reject_invite: %s out of room: %s", user_id, room_id)
|
||||
|
||||
try:
|
||||
event = yield self.federation_handler.do_remotely_reject_invite(
|
||||
event = await self.federation_handler.do_remotely_reject_invite(
|
||||
remote_room_hosts, room_id, user_id
|
||||
)
|
||||
ret = event.get_pdu_json()
|
||||
|
@ -148,9 +144,9 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||
# The 'except' clause is very broad, but we need to
|
||||
# capture everything from DNS failures upwards
|
||||
#
|
||||
logger.warn("Failed to reject invite: %s", e)
|
||||
logger.warning("Failed to reject invite: %s", e)
|
||||
|
||||
yield self.store.locally_reject_invite(user_id, room_id)
|
||||
await self.store.locally_reject_invite(user_id, room_id)
|
||||
ret = {}
|
||||
|
||||
return 200, ret
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
|
||||
|
@ -74,11 +72,10 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
|
|||
"address": address,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
async def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
yield self.registration_handler.register_with_store(
|
||||
await self.registration_handler.register_with_store(
|
||||
user_id=user_id,
|
||||
password_hash=content["password_hash"],
|
||||
was_guest=content["was_guest"],
|
||||
|
@ -117,14 +114,13 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
|
|||
"""
|
||||
return {"auth_result": auth_result, "access_token": access_token}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
async def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
auth_result = content["auth_result"]
|
||||
access_token = content["access_token"]
|
||||
|
||||
yield self.registration_handler.post_registration_actions(
|
||||
await self.registration_handler.post_registration_actions(
|
||||
user_id=user_id, auth_result=auth_result, access_token=access_token
|
||||
)
|
||||
|
||||
|
|
|
@ -87,8 +87,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
|||
|
||||
return payload
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, event_id):
|
||||
async def _handle_request(self, request, event_id):
|
||||
with Measure(self.clock, "repl_send_event_parse"):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
|
@ -101,7 +100,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
|||
event = EventType(event_dict, internal_metadata, rejected_reason)
|
||||
|
||||
requester = Requester.deserialize(self.store, content["requester"])
|
||||
context = yield EventContext.deserialize(self.store, content["context"])
|
||||
context = EventContext.deserialize(self.store, content["context"])
|
||||
|
||||
ratelimit = content["ratelimit"]
|
||||
extra_users = [UserID.from_string(u) for u in content["extra_users"]]
|
||||
|
@ -113,7 +112,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
|||
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
|
||||
)
|
||||
|
||||
yield self.event_creation_handler.persist_and_notify_client_event(
|
||||
await self.event_creation_handler.persist_and_notify_client_event(
|
||||
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
|
||||
)
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
import six
|
||||
|
||||
|
@ -44,7 +45,14 @@ class BaseSlavedStore(SQLBaseStore):
|
|||
|
||||
self.hs = hs
|
||||
|
||||
def stream_positions(self):
|
||||
def stream_positions(self) -> Dict[str, int]:
|
||||
"""
|
||||
Get the current positions of all the streams this store wants to subscribe to
|
||||
|
||||
Returns:
|
||||
map from stream name to the most recent update we have for
|
||||
that stream (ie, the point we want to start replicating from)
|
||||
"""
|
||||
pos = {}
|
||||
if self._cache_id_gen:
|
||||
pos["caches"] = self._cache_id_gen.get_current_token()
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
|
||||
from synapse.storage.data_stores.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.data_stores.main.end_to_end_keys import EndToEndKeyWorkerStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
@ -42,14 +43,22 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto
|
|||
|
||||
def stream_positions(self):
|
||||
result = super(SlavedDeviceStore, self).stream_positions()
|
||||
result["device_lists"] = self._device_list_id_gen.get_current_token()
|
||||
# The user signature stream uses the same stream ID generator as the
|
||||
# device list stream, so set them both to the device list ID
|
||||
# generator's current token.
|
||||
current_token = self._device_list_id_gen.get_current_token()
|
||||
result[DeviceListsStream.NAME] = current_token
|
||||
result[UserSignatureStream.NAME] = current_token
|
||||
return result
|
||||
|
||||
def process_replication_rows(self, stream_name, token, rows):
|
||||
if stream_name == "device_lists":
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
self._device_list_id_gen.advance(token)
|
||||
for row in rows:
|
||||
self._invalidate_caches_for_devices(token, row.user_id, row.destination)
|
||||
elif stream_name == UserSignatureStream.NAME:
|
||||
for row in rows:
|
||||
self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
|
||||
return super(SlavedDeviceStore, self).process_replication_rows(
|
||||
stream_name, token, rows
|
||||
)
|
||||
|
|
|
@ -16,10 +16,17 @@
|
|||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.protocol import ReconnectingClientFactory
|
||||
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.tcp.protocol import (
|
||||
AbstractReplicationClientHandler,
|
||||
ClientReplicationStreamProtocol,
|
||||
)
|
||||
|
||||
from .commands import (
|
||||
FederationAckCommand,
|
||||
InvalidateCacheCommand,
|
||||
|
@ -27,7 +34,6 @@ from .commands import (
|
|||
UserIpCommand,
|
||||
UserSyncCommand,
|
||||
)
|
||||
from .protocol import ClientReplicationStreamProtocol
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -42,7 +48,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
|
|||
|
||||
maxDelay = 30 # Try at least once every N seconds
|
||||
|
||||
def __init__(self, hs, client_name, handler):
|
||||
def __init__(self, hs, client_name, handler: AbstractReplicationClientHandler):
|
||||
self.client_name = client_name
|
||||
self.handler = handler
|
||||
self.server_name = hs.config.server_name
|
||||
|
@ -68,13 +74,13 @@ class ReplicationClientFactory(ReconnectingClientFactory):
|
|||
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
|
||||
|
||||
|
||||
class ReplicationClientHandler(object):
|
||||
class ReplicationClientHandler(AbstractReplicationClientHandler):
|
||||
"""A base handler that can be passed to the ReplicationClientFactory.
|
||||
|
||||
By default proxies incoming replication data to the SlaveStore.
|
||||
"""
|
||||
|
||||
def __init__(self, store):
|
||||
def __init__(self, store: BaseSlavedStore):
|
||||
self.store = store
|
||||
|
||||
# The current connection. None if we are currently (re)connecting
|
||||
|
@ -138,11 +144,13 @@ class ReplicationClientHandler(object):
|
|||
if d:
|
||||
d.callback(data)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
def get_streams_to_replicate(self) -> Dict[str, int]:
|
||||
"""Called when a new connection has been established and we need to
|
||||
subscribe to streams.
|
||||
|
||||
Returns a dictionary of stream name to token.
|
||||
Returns:
|
||||
map from stream name to the most recent update we have for
|
||||
that stream (ie, the point we want to start replicating from)
|
||||
"""
|
||||
args = self.store.stream_positions()
|
||||
user_account_data = args.pop("user_account_data", None)
|
||||
|
@ -168,7 +176,7 @@ class ReplicationClientHandler(object):
|
|||
if self.connection:
|
||||
self.connection.send_command(cmd)
|
||||
else:
|
||||
logger.warn("Queuing command as not connected: %r", cmd.NAME)
|
||||
logger.warning("Queuing command as not connected: %r", cmd.NAME)
|
||||
self.pending_commands.append(cmd)
|
||||
|
||||
def send_federation_ack(self, token):
|
||||
|
|
|
@ -48,7 +48,7 @@ indicate which side is sending, these are *not* included on the wire::
|
|||
> ERROR server stopping
|
||||
* connection closed by server *
|
||||
"""
|
||||
|
||||
import abc
|
||||
import fcntl
|
||||
import logging
|
||||
import struct
|
||||
|
@ -65,6 +65,7 @@ from twisted.python.failure import Failure
|
|||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
from .commands import (
|
||||
|
@ -249,7 +250,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
|||
return handler(cmd)
|
||||
|
||||
def close(self):
|
||||
logger.warn("[%s] Closing connection", self.id())
|
||||
logger.warning("[%s] Closing connection", self.id())
|
||||
self.time_we_closed = self.clock.time_msec()
|
||||
self.transport.loseConnection()
|
||||
self.on_connection_closed()
|
||||
|
@ -558,11 +559,80 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
|||
self.streamer.lost_connection(self)
|
||||
|
||||
|
||||
class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
The interface for the handler that should be passed to
|
||||
ClientReplicationStreamProtocol
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
"""Called to handle a batch of replication data with a given stream token.
|
||||
|
||||
Args:
|
||||
stream_name (str): name of the replication stream for this batch of rows
|
||||
token (int): stream token for this batch of rows
|
||||
rows (list): a list of Stream.ROW_TYPE objects as returned by
|
||||
Stream.parse_row.
|
||||
|
||||
Returns:
|
||||
Deferred|None
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_position(self, stream_name, token):
|
||||
"""Called when we get new position data."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_sync(self, data):
|
||||
"""Called when get a new SYNC command."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_streams_to_replicate(self):
|
||||
"""Called when a new connection has been established and we need to
|
||||
subscribe to streams.
|
||||
|
||||
Returns:
|
||||
map from stream name to the most recent update we have for
|
||||
that stream (ie, the point we want to start replicating from)
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_currently_syncing_users(self):
|
||||
"""Get the list of currently syncing users (if any). This is called
|
||||
when a connection has been established and we need to send the
|
||||
currently syncing users."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_connection(self, connection):
|
||||
"""Called when a connection has been established (or lost with None).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def finished_connecting(self):
|
||||
"""Called when we have successfully subscribed and caught up to all
|
||||
streams we're interested in.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
VALID_INBOUND_COMMANDS = VALID_SERVER_COMMANDS
|
||||
VALID_OUTBOUND_COMMANDS = VALID_CLIENT_COMMANDS
|
||||
|
||||
def __init__(self, client_name, server_name, clock, handler):
|
||||
def __init__(
|
||||
self,
|
||||
client_name: str,
|
||||
server_name: str,
|
||||
clock: Clock,
|
||||
handler: AbstractReplicationClientHandler,
|
||||
):
|
||||
BaseReplicationStreamProtocol.__init__(self, clock)
|
||||
|
||||
self.client_name = client_name
|
||||
|
|
|
@ -45,5 +45,6 @@ STREAMS_MAP = {
|
|||
_base.TagAccountDataStream,
|
||||
_base.AccountDataStream,
|
||||
_base.GroupServerStream,
|
||||
_base.UserSignatureStream,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -95,6 +95,7 @@ GroupsStreamRow = namedtuple(
|
|||
"GroupsStreamRow",
|
||||
("group_id", "user_id", "type", "content"), # str # str # str # dict
|
||||
)
|
||||
UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id")) # str
|
||||
|
||||
|
||||
class Stream(object):
|
||||
|
@ -438,3 +439,20 @@ class GroupServerStream(Stream):
|
|||
self.update_function = store.get_all_groups_changes
|
||||
|
||||
super(GroupServerStream, self).__init__(hs)
|
||||
|
||||
|
||||
class UserSignatureStream(Stream):
|
||||
"""A user has signed their own device with their user-signing key
|
||||
"""
|
||||
|
||||
NAME = "user_signature"
|
||||
_LIMITED = False
|
||||
ROW_TYPE = UserSignatureStreamRow
|
||||
|
||||
def __init__(self, hs):
|
||||
store = hs.get_datastore()
|
||||
|
||||
self.current_token = store.get_device_stream_token
|
||||
self.update_function = store.get_all_user_signature_changes_for_remotes
|
||||
|
||||
super(UserSignatureStream, self).__init__(hs)
|
||||
|
|
|
@ -286,7 +286,7 @@ class PurgeHistoryRestServlet(RestServlet):
|
|||
room_id, stream_ordering
|
||||
)
|
||||
if not r:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[purge] purging events not possible: No event found "
|
||||
"(received_ts %i => stream_ordering %i)",
|
||||
ts,
|
||||
|
|
|
@ -203,10 +203,11 @@ class LoginRestServlet(RestServlet):
|
|||
address = address.lower()
|
||||
|
||||
# Check for login providers that support 3pid login types
|
||||
canonical_user_id, callback_3pid = (
|
||||
yield self.auth_handler.check_password_provider_3pid(
|
||||
medium, address, login_submission["password"]
|
||||
)
|
||||
(
|
||||
canonical_user_id,
|
||||
callback_3pid,
|
||||
) = yield self.auth_handler.check_password_provider_3pid(
|
||||
medium, address, login_submission["password"]
|
||||
)
|
||||
if canonical_user_id:
|
||||
# Authentication through password provider and 3pid succeeded
|
||||
|
@ -221,7 +222,7 @@ class LoginRestServlet(RestServlet):
|
|||
medium, address
|
||||
)
|
||||
if not user_id:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"unknown 3pid identifier medium %s, address %r", medium, address
|
||||
)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
@ -280,8 +281,8 @@ class LoginRestServlet(RestServlet):
|
|||
def do_token_login(self, login_submission):
|
||||
token = login_submission["token"]
|
||||
auth_handler = self.auth_handler
|
||||
user_id = (
|
||||
yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
|
||||
user_id = yield auth_handler.validate_short_term_login_token_and_get_user_id(
|
||||
token
|
||||
)
|
||||
|
||||
result = yield self._register_device_with_callback(user_id, login_submission)
|
||||
|
@ -380,7 +381,7 @@ class CasTicketServlet(RestServlet):
|
|||
self.cas_displayname_attribute = hs.config.cas_displayname_attribute
|
||||
self.cas_required_attributes = hs.config.cas_required_attributes
|
||||
self._sso_auth_handler = SSOAuthHandler(hs)
|
||||
self._http_client = hs.get_simple_http_client()
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request):
|
||||
|
|
|
@ -21,8 +21,6 @@ from six.moves.urllib import parse as urlparse
|
|||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
|
@ -85,11 +83,10 @@ class RoomCreateRestServlet(TransactionRestServlet):
|
|||
set_tag("txn_id", txn_id)
|
||||
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_POST(self, request):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
info = yield self._room_creation_handler.create_room(
|
||||
info = await self._room_creation_handler.create_room(
|
||||
requester, self.get_room_config(request)
|
||||
)
|
||||
|
||||
|
@ -154,15 +151,14 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||
def on_PUT_no_state_key(self, request, room_id, event_type):
|
||||
return self.on_PUT(request, room_id, event_type, "")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id, event_type, state_key):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request, room_id, event_type, state_key):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
format = parse_string(
|
||||
request, "format", default="content", allowed_values=["content", "event"]
|
||||
)
|
||||
|
||||
msg_handler = self.message_handler
|
||||
data = yield msg_handler.get_room_data(
|
||||
data = await msg_handler.get_room_data(
|
||||
user_id=requester.user.to_string(),
|
||||
room_id=room_id,
|
||||
event_type=event_type,
|
||||
|
@ -179,9 +175,8 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||
elif format == "content":
|
||||
return 200, data.get_dict()["content"]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
if txn_id:
|
||||
set_tag("txn_id", txn_id)
|
||||
|
@ -200,7 +195,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||
|
||||
if event_type == EventTypes.Member:
|
||||
membership = content.get("membership", None)
|
||||
event = yield self.room_member_handler.update_membership(
|
||||
event = await self.room_member_handler.update_membership(
|
||||
requester,
|
||||
target=UserID.from_string(state_key),
|
||||
room_id=room_id,
|
||||
|
@ -208,7 +203,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||
content=content,
|
||||
)
|
||||
else:
|
||||
event = yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
event = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, event_dict, txn_id=txn_id
|
||||
)
|
||||
|
||||
|
@ -231,9 +226,8 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
|||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
|
||||
register_txn_path(self, PATTERNS, http_server, with_get=True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id, event_type, txn_id=None):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_POST(self, request, room_id, event_type, txn_id=None):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
event_dict = {
|
||||
|
@ -246,7 +240,7 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
|||
if b"ts" in request.args and requester.app_service:
|
||||
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
|
||||
|
||||
event = yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
event = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, event_dict, txn_id=txn_id
|
||||
)
|
||||
|
||||
|
@ -276,9 +270,8 @@ class JoinRoomAliasServlet(TransactionRestServlet):
|
|||
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
|
||||
register_txn_path(self, PATTERNS, http_server)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_identifier, txn_id=None):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_POST(self, request, room_identifier, txn_id=None):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
try:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
@ -298,14 +291,14 @@ class JoinRoomAliasServlet(TransactionRestServlet):
|
|||
elif RoomAlias.is_valid(room_identifier):
|
||||
handler = self.room_member_handler
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
room_id, remote_room_hosts = yield handler.lookup_room_alias(room_alias)
|
||||
room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias)
|
||||
room_id = room_id.to_string()
|
||||
else:
|
||||
raise SynapseError(
|
||||
400, "%s was not legal room ID or room alias" % (room_identifier,)
|
||||
)
|
||||
|
||||
yield self.room_member_handler.update_membership(
|
||||
await self.room_member_handler.update_membership(
|
||||
requester=requester,
|
||||
target=requester.user,
|
||||
room_id=room_id,
|
||||
|
@ -335,12 +328,11 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request):
|
||||
async def on_GET(self, request):
|
||||
server = parse_string(request, "server", default=None)
|
||||
|
||||
try:
|
||||
yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
except InvalidClientCredentialsError as e:
|
||||
# Option to allow servers to require auth when accessing
|
||||
# /publicRooms via CS API. This is especially helpful in private
|
||||
|
@ -367,19 +359,18 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||
|
||||
handler = self.hs.get_room_list_handler()
|
||||
if server:
|
||||
data = yield handler.get_remote_public_room_list(
|
||||
data = await handler.get_remote_public_room_list(
|
||||
server, limit=limit, since_token=since_token
|
||||
)
|
||||
else:
|
||||
data = yield handler.get_local_public_room_list(
|
||||
data = await handler.get_local_public_room_list(
|
||||
limit=limit, since_token=since_token
|
||||
)
|
||||
|
||||
return 200, data
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_POST(self, request):
|
||||
await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
server = parse_string(request, "server", default=None)
|
||||
content = parse_json_object_from_request(request)
|
||||
|
@ -408,7 +399,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||
|
||||
handler = self.hs.get_room_list_handler()
|
||||
if server:
|
||||
data = yield handler.get_remote_public_room_list(
|
||||
data = await handler.get_remote_public_room_list(
|
||||
server,
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
|
@ -417,7 +408,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
|
|||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
else:
|
||||
data = yield handler.get_local_public_room_list(
|
||||
data = await handler.get_local_public_room_list(
|
||||
limit=limit,
|
||||
since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
|
@ -436,10 +427,9 @@ class RoomMemberListRestServlet(RestServlet):
|
|||
self.message_handler = hs.get_message_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id):
|
||||
async def on_GET(self, request, room_id):
|
||||
# TODO support Pagination stream API (limit/tokens)
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
handler = self.message_handler
|
||||
|
||||
# request the state as of a given event, as identified by a stream token,
|
||||
|
@ -459,7 +449,7 @@ class RoomMemberListRestServlet(RestServlet):
|
|||
membership = parse_string(request, "membership")
|
||||
not_membership = parse_string(request, "not_membership")
|
||||
|
||||
events = yield handler.get_state_events(
|
||||
events = await handler.get_state_events(
|
||||
room_id=room_id,
|
||||
user_id=requester.user.to_string(),
|
||||
at_token=at_token,
|
||||
|
@ -488,11 +478,10 @@ class JoinedRoomMemberListRestServlet(RestServlet):
|
|||
self.message_handler = hs.get_message_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_GET(self, request, room_id):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
users_with_profile = yield self.message_handler.get_joined_members(
|
||||
users_with_profile = await self.message_handler.get_joined_members(
|
||||
requester, room_id
|
||||
)
|
||||
|
||||
|
@ -508,9 +497,8 @@ class RoomMessageListRestServlet(RestServlet):
|
|||
self.pagination_handler = hs.get_pagination_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request, room_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
pagination_config = PaginationConfig.from_request(request, default_limit=10)
|
||||
as_client_event = b"raw" not in request.args
|
||||
filter_bytes = parse_string(request, b"filter", encoding=None)
|
||||
|
@ -521,7 +509,7 @@ class RoomMessageListRestServlet(RestServlet):
|
|||
as_client_event = False
|
||||
else:
|
||||
event_filter = None
|
||||
msgs = yield self.pagination_handler.get_messages(
|
||||
msgs = await self.pagination_handler.get_messages(
|
||||
room_id=room_id,
|
||||
requester=requester,
|
||||
pagin_config=pagination_config,
|
||||
|
@ -541,11 +529,10 @@ class RoomStateRestServlet(RestServlet):
|
|||
self.message_handler = hs.get_message_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request, room_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
# Get all the current state for this room
|
||||
events = yield self.message_handler.get_state_events(
|
||||
events = await self.message_handler.get_state_events(
|
||||
room_id=room_id,
|
||||
user_id=requester.user.to_string(),
|
||||
is_guest=requester.is_guest,
|
||||
|
@ -562,11 +549,10 @@ class RoomInitialSyncRestServlet(RestServlet):
|
|||
self.initial_sync_handler = hs.get_initial_sync_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request, room_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
pagination_config = PaginationConfig.from_request(request)
|
||||
content = yield self.initial_sync_handler.room_initial_sync(
|
||||
content = await self.initial_sync_handler.room_initial_sync(
|
||||
room_id=room_id, requester=requester, pagin_config=pagination_config
|
||||
)
|
||||
return 200, content
|
||||
|
@ -584,11 +570,10 @@ class RoomEventServlet(RestServlet):
|
|||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request, room_id, event_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
try:
|
||||
event = yield self.event_handler.get_event(
|
||||
event = await self.event_handler.get_event(
|
||||
requester.user, room_id, event_id
|
||||
)
|
||||
except AuthError:
|
||||
|
@ -599,7 +584,7 @@ class RoomEventServlet(RestServlet):
|
|||
|
||||
time_now = self.clock.time_msec()
|
||||
if event:
|
||||
event = yield self._event_serializer.serialize_event(event, time_now)
|
||||
event = await self._event_serializer.serialize_event(event, time_now)
|
||||
return 200, event
|
||||
|
||||
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
||||
|
@ -617,9 +602,8 @@ class RoomEventContextServlet(RestServlet):
|
|||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request, room_id, event_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
limit = parse_integer(request, "limit", default=10)
|
||||
|
||||
|
@ -631,7 +615,7 @@ class RoomEventContextServlet(RestServlet):
|
|||
else:
|
||||
event_filter = None
|
||||
|
||||
results = yield self.room_context_handler.get_event_context(
|
||||
results = await self.room_context_handler.get_event_context(
|
||||
requester.user, room_id, event_id, limit, event_filter
|
||||
)
|
||||
|
||||
|
@ -639,16 +623,16 @@ class RoomEventContextServlet(RestServlet):
|
|||
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
results["events_before"] = yield self._event_serializer.serialize_events(
|
||||
results["events_before"] = await self._event_serializer.serialize_events(
|
||||
results["events_before"], time_now
|
||||
)
|
||||
results["event"] = yield self._event_serializer.serialize_event(
|
||||
results["event"] = await self._event_serializer.serialize_event(
|
||||
results["event"], time_now
|
||||
)
|
||||
results["events_after"] = yield self._event_serializer.serialize_events(
|
||||
results["events_after"] = await self._event_serializer.serialize_events(
|
||||
results["events_after"], time_now
|
||||
)
|
||||
results["state"] = yield self._event_serializer.serialize_events(
|
||||
results["state"] = await self._event_serializer.serialize_events(
|
||||
results["state"], time_now
|
||||
)
|
||||
|
||||
|
@ -665,11 +649,10 @@ class RoomForgetRestServlet(TransactionRestServlet):
|
|||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
|
||||
register_txn_path(self, PATTERNS, http_server)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id, txn_id=None):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=False)
|
||||
async def on_POST(self, request, room_id, txn_id=None):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=False)
|
||||
|
||||
yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
|
||||
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
|
||||
|
||||
return 200, {}
|
||||
|
||||
|
@ -696,9 +679,8 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
|||
)
|
||||
register_txn_path(self, PATTERNS, http_server)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id, membership_action, txn_id=None):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_POST(self, request, room_id, membership_action, txn_id=None):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
if requester.is_guest and membership_action not in {
|
||||
Membership.JOIN,
|
||||
|
@ -714,7 +696,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
|||
content = {}
|
||||
|
||||
if membership_action == "invite" and self._has_3pid_invite_keys(content):
|
||||
yield self.room_member_handler.do_3pid_invite(
|
||||
await self.room_member_handler.do_3pid_invite(
|
||||
room_id,
|
||||
requester.user,
|
||||
content["medium"],
|
||||
|
@ -735,7 +717,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
|||
if "reason" in content and membership_action in ["kick", "ban"]:
|
||||
event_content = {"reason": content["reason"]}
|
||||
|
||||
yield self.room_member_handler.update_membership(
|
||||
await self.room_member_handler.update_membership(
|
||||
requester=requester,
|
||||
target=target,
|
||||
room_id=room_id,
|
||||
|
@ -777,12 +759,11 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
|||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
|
||||
register_txn_path(self, PATTERNS, http_server)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id, event_id, txn_id=None):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_POST(self, request, room_id, event_id, txn_id=None):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
event = yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
event = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Redaction,
|
||||
|
@ -816,29 +797,28 @@ class RoomTypingRestServlet(RestServlet):
|
|||
self.typing_handler = hs.get_typing_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, room_id, user_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_PUT(self, request, room_id, user_id):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
room_id = urlparse.unquote(room_id)
|
||||
target_user = UserID.from_string(urlparse.unquote(user_id))
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
yield self.presence_handler.bump_presence_active_time(requester.user)
|
||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||
|
||||
# Limit timeout to stop people from setting silly typing timeouts.
|
||||
timeout = min(content.get("timeout", 30000), 120000)
|
||||
|
||||
if content["typing"]:
|
||||
yield self.typing_handler.started_typing(
|
||||
await self.typing_handler.started_typing(
|
||||
target_user=target_user,
|
||||
auth_user=requester.user,
|
||||
room_id=room_id,
|
||||
timeout=timeout,
|
||||
)
|
||||
else:
|
||||
yield self.typing_handler.stopped_typing(
|
||||
await self.typing_handler.stopped_typing(
|
||||
target_user=target_user, auth_user=requester.user, room_id=room_id
|
||||
)
|
||||
|
||||
|
@ -853,14 +833,13 @@ class SearchRestServlet(RestServlet):
|
|||
self.handlers = hs.get_handlers()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_POST(self, request):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
batch = parse_string(request, "next_batch")
|
||||
results = yield self.handlers.search_handler.search(
|
||||
results = await self.handlers.search_handler.search(
|
||||
requester.user, content, batch
|
||||
)
|
||||
|
||||
|
@ -875,11 +854,10 @@ class JoinedRoomsRestServlet(RestServlet):
|
|||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
async def on_GET(self, request):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
|
||||
room_ids = await self.store.get_rooms_for_user(requester.user.to_string())
|
||||
return 200, {"joined_rooms": list(room_ids)}
|
||||
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
|
|||
def on_POST(self, request):
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"User password resets have been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
|
@ -148,7 +148,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
|||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_password_reset_template_failure_html],
|
||||
)
|
||||
|
@ -162,7 +162,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
|||
)
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Password reset emails have been disabled due to lack of an email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
|
@ -183,7 +183,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
|||
# Perform a 302 redirect if next_link is set
|
||||
if next_link:
|
||||
if next_link.startswith("file:///"):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not redirecting to next_link as it is a local file: address"
|
||||
)
|
||||
else:
|
||||
|
@ -350,7 +350,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
|
|||
def on_POST(self, request):
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Adding emails have been disabled due to lack of an email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
|
@ -441,7 +441,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
|||
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
|
||||
|
||||
if not self.hs.config.account_threepid_delegate_msisdn:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"No upstream msisdn account_threepid_delegate configured on the server to "
|
||||
"handle this request"
|
||||
)
|
||||
|
@ -479,7 +479,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
|
|||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_add_threepid_template_failure_html],
|
||||
)
|
||||
|
@ -488,7 +488,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
|
|||
def on_GET(self, request):
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Adding emails have been disabled due to lack of an email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
|
@ -515,7 +515,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
|
|||
# Perform a 302 redirect if next_link is set
|
||||
if next_link:
|
||||
if next_link.startswith("file:///"):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not redirecting to next_link as it is a local file: address"
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
|
||||
from ._base import client_patterns
|
||||
|
@ -34,17 +32,16 @@ class ReadMarkerRestServlet(RestServlet):
|
|||
self.read_marker_handler = hs.get_read_marker_handler()
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_POST(self, request, room_id):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
yield self.presence_handler.bump_presence_active_time(requester.user)
|
||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
read_event_id = body.get("m.read", None)
|
||||
if read_event_id:
|
||||
yield self.receipts_handler.received_client_receipt(
|
||||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
"m.read",
|
||||
user_id=requester.user.to_string(),
|
||||
|
@ -53,7 +50,7 @@ class ReadMarkerRestServlet(RestServlet):
|
|||
|
||||
read_marker_event_id = body.get("m.fully_read", None)
|
||||
if read_marker_event_id:
|
||||
yield self.read_marker_handler.received_client_read_marker(
|
||||
await self.read_marker_handler.received_client_read_marker(
|
||||
room_id,
|
||||
user_id=requester.user.to_string(),
|
||||
event_id=read_marker_event_id,
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.http.servlet import RestServlet
|
||||
|
||||
|
@ -39,16 +37,15 @@ class ReceiptRestServlet(RestServlet):
|
|||
self.receipts_handler = hs.get_receipts_handler()
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id, receipt_type, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
async def on_POST(self, request, room_id, receipt_type, event_id):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
if receipt_type != "m.read":
|
||||
raise SynapseError(400, "Receipt type must be 'm.read'")
|
||||
|
||||
yield self.presence_handler.bump_presence_active_time(requester.user)
|
||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||
|
||||
yield self.receipts_handler.received_client_receipt(
|
||||
await self.receipts_handler.received_client_receipt(
|
||||
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
|
||||
)
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
|
|||
def on_POST(self, request):
|
||||
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Email registration has been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
|
@ -207,7 +207,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
|
|||
)
|
||||
|
||||
if not self.hs.config.account_threepid_delegate_msisdn:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"No upstream msisdn account_threepid_delegate configured on the server to "
|
||||
"handle this request"
|
||||
)
|
||||
|
@ -247,13 +247,13 @@ class RegistrationSubmitTokenServlet(RestServlet):
|
|||
self.store = hs.get_datastore()
|
||||
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_registration_template_failure_html],
|
||||
)
|
||||
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_registration_template_failure_html],
|
||||
)
|
||||
|
@ -266,7 +266,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
|
|||
)
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"User registration via email has been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
|
@ -287,7 +287,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
|
|||
# Perform a 302 redirect if next_link is set
|
||||
if next_link:
|
||||
if next_link.startswith("file:///"):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not redirecting to next_link as it is a local file: address"
|
||||
)
|
||||
else:
|
||||
|
@ -480,7 +480,7 @@ class RegisterRestServlet(RestServlet):
|
|||
# a password to work around a client bug where it sent
|
||||
# the 'initial_device_display_name' param alone, wiping out
|
||||
# the original registration params
|
||||
logger.warn("Ignoring initial_device_display_name without password")
|
||||
logger.warning("Ignoring initial_device_display_name without password")
|
||||
del body["initial_device_display_name"]
|
||||
|
||||
session_id = self.auth_handler.get_session_id(body)
|
||||
|
|
|
@ -112,9 +112,14 @@ class SyncRestServlet(RestServlet):
|
|||
full_state = parse_boolean(request, "full_state", default=False)
|
||||
|
||||
logger.debug(
|
||||
"/sync: user=%r, timeout=%r, since=%r,"
|
||||
" set_presence=%r, filter_id=%r, device_id=%r"
|
||||
% (user, timeout, since, set_presence, filter_id, device_id)
|
||||
"/sync: user=%r, timeout=%r, since=%r, "
|
||||
"set_presence=%r, filter_id=%r, device_id=%r",
|
||||
user,
|
||||
timeout,
|
||||
since,
|
||||
set_presence,
|
||||
filter_id,
|
||||
device_id,
|
||||
)
|
||||
|
||||
request_key = (user, timeout, since, filter_id, full_state, device_id)
|
||||
|
@ -389,7 +394,7 @@ class SyncRestServlet(RestServlet):
|
|||
# We've had bug reports that events were coming down under the
|
||||
# wrong room.
|
||||
if event.room_id != room.room_id:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Event %r is under room %r instead of %r",
|
||||
event.event_id,
|
||||
room.room_id,
|
||||
|
|
|
@ -65,6 +65,9 @@ class VersionsRestServlet(RestServlet):
|
|||
"m.require_identity_server": False,
|
||||
# as per MSC2290
|
||||
"m.separate_add_and_bind": True,
|
||||
# Implements support for label-based filtering as described in
|
||||
# MSC2326.
|
||||
"org.matrix.label_based_filtering": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
|
|
@ -102,7 +102,7 @@ class RemoteKey(DirectServeResource):
|
|||
@wrap_json_request_handler
|
||||
async def _async_render_GET(self, request):
|
||||
if len(request.postpath) == 1:
|
||||
server, = request.postpath
|
||||
(server,) = request.postpath
|
||||
query = {server.decode("ascii"): {}}
|
||||
elif len(request.postpath) == 2:
|
||||
server, key_id = request.postpath
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue