Merge branch 'develop' into rav/saml_config_cleanup

This commit is contained in:
Richard van der Hoff 2019-09-19 15:05:31 +01:00
commit b789c7eb03
363 changed files with 10632 additions and 5352 deletions

View file

@ -22,9 +22,10 @@ from netaddr import IPAddress
from twisted.internet import defer
import synapse.logging.opentracing as opentracing
import synapse.types
from synapse import event_auth
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.constants import EventTypes, JoinRules, Membership, UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@ -178,6 +179,7 @@ class Auth(object):
def get_public_keys(self, invite_event):
return event_auth.get_public_keys(invite_event)
@opentracing.trace
@defer.inlineCallbacks
def get_user_by_req(
self, request, allow_guest=False, rights="access", allow_expired=False
@ -209,6 +211,7 @@ class Auth(object):
user_id, app_service = yield self._get_appservice_user_id(request)
if user_id:
request.authenticated_entity = user_id
opentracing.set_tag("authenticated_entity", user_id)
if ip_addr and self.hs.config.track_appservice_user_ips:
yield self.store.insert_client_ip(
@ -259,6 +262,7 @@ class Auth(object):
)
request.authenticated_entity = user.to_string()
opentracing.set_tag("authenticated_entity", user.to_string())
return synapse.types.create_requester(
user, token_id, is_guest, device_id, app_service=app_service
@ -272,25 +276,25 @@ class Auth(object):
self.get_access_token_from_request(request)
)
if app_service is None:
return (None, None)
return None, None
if app_service.ip_range_whitelist:
ip_address = IPAddress(self.hs.get_ip_from_request(request))
if ip_address not in app_service.ip_range_whitelist:
return (None, None)
return None, None
if b"user_id" not in request.args:
return (app_service.sender, app_service)
return app_service.sender, app_service
user_id = request.args[b"user_id"][0].decode("utf8")
if app_service.sender == user_id:
return (app_service.sender, app_service)
return app_service.sender, app_service
if not app_service.is_interested_in_user(user_id):
raise AuthError(403, "Application service cannot masquerade as this user.")
if not (yield self.store.get_user_by_id(user_id)):
raise AuthError(403, "Application service has not registered this user")
return (user_id, app_service)
return user_id, app_service
@defer.inlineCallbacks
def get_user_by_access_token(self, token, rights="access"):
@ -690,7 +694,7 @@ class Auth(object):
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.check_user_was_in_room(room_id, user_id)
return (member_event.membership, member_event.event_id)
return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
@ -699,14 +703,13 @@ class Auth(object):
visibility
and visibility.content["history_visibility"] == "world_readable"
):
return (Membership.JOIN, None)
return
return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@defer.inlineCallbacks
def check_auth_blocking(self, user_id=None, threepid=None):
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
"""Checks if the user should be rejected for some external reason,
such as monthly active user limiting or global disable flag
@ -719,6 +722,9 @@ class Auth(object):
with a MAU blocked server, normally they would be rejected but their
threepid is on the reserved list. user_id and
threepid should never be set at the same time.
user_type(str|None): If present, is used to decide whether to check against
certain blocking reasons like MAU.
"""
# Never fail an auth check for the server notices users or support user
@ -756,6 +762,10 @@ class Auth(object):
self.hs.config.mau_limits_reserved_threepids, threepid
):
return
elif user_type == UserTypes.SUPPORT:
# If the user does not exist yet and is of type "support",
# allow registration. Support users are excluded from MAU checks.
return
# Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count()
if current_mau >= self.hs.config.max_mau_value:

View file

@ -122,7 +122,8 @@ class UserTypes(object):
"""
SUPPORT = "support"
ALL_USER_TYPES = (SUPPORT,)
BOT = "bot"
ALL_USER_TYPES = (SUPPORT, BOT)
class RelationTypes(object):

View file

@ -36,18 +36,20 @@ from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
# list of tuples of function, args list, kwargs dict
_sighup_callbacks = []
def register_sighup(func):
def register_sighup(func, *args, **kwargs):
"""
Register a function to be called when a SIGHUP occurs.
Args:
func (function): Function to be called when sent a SIGHUP signal.
Will be called with a single argument, the homeserver.
Will be called with a single default argument, the homeserver.
*args, **kwargs: args and kwargs to be passed to the target function.
"""
_sighup_callbacks.append(func)
_sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(appname, config, run_command=reactor.run):
@ -248,8 +250,8 @@ def start(hs, listeners=None):
# we're not using systemd.
sdnotify(b"RELOADING=1")
for i in _sighup_callbacks:
i(hs)
for i, args, kwargs in _sighup_callbacks:
i(hs, *args, **kwargs)
sdnotify(b"READY=1")

View file

@ -227,8 +227,6 @@ def start(config_options):
config.start_pushers = False
config.send_federation = False
setup_logging(config, use_worker_options=True)
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -241,6 +239,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
# We use task.react as the basic run command as it correctly handles tearing

View file

@ -141,8 +141,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.appservice"
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -167,6 +165,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ps, config, use_worker_options=True)
ps.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ps, config.worker_listeners

View file

@ -119,7 +119,7 @@ class ClientReaderServer(HomeServer):
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
VersionsRestServlet().register(resource)
VersionsRestServlet(self).register(resource)
resources.update({"/_matrix/client": resource})
@ -179,8 +179,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.client_reader"
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -193,6 +191,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -175,8 +175,6 @@ def start(config_options):
assert config.worker_replication_http_port is not None
setup_logging(config, use_worker_options=True)
# This should only be done on the user directory worker or the master
config.update_user_directory = False
@ -192,6 +190,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -160,8 +160,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.federation_reader"
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -174,6 +172,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -171,8 +171,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.federation_sender"
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -197,6 +195,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet):
except HttpResponseException as e:
raise e.to_synapse_error()
return (200, result)
return 200, result
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
yield self.auth.get_user_by_req(request)
return (200, {})
return 200, {}
class KeyUploadServlet(RestServlet):
@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet):
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
return (200, result)
return 200, result
else:
# Just interested in counts.
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
return (200, {"one_time_key_counts": result})
return 200, {"one_time_key_counts": result}
class FrontendProxySlavedStore(
@ -232,8 +232,6 @@ def start(config_options):
assert config.worker_main_http_uri is not None
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -246,6 +244,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -341,8 +341,6 @@ def setup(config_options):
# generating config files and shouldn't try to continue.
sys.exit(0)
synapse.config.logger.setup_logging(config, use_worker_options=False)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -356,6 +354,8 @@ def setup(config_options):
database_engine=database_engine,
)
synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
logger.info("Preparing database: %s...", config.database_config["name"])
try:
@ -561,10 +561,12 @@ def run(hs):
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
logger.info("Reporting stats to matrix.org: %s" % (stats,))
logger.info(
"Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
)
try:
yield hs.get_simple_http_client().put_json(
"https://matrix.org/report-usage-stats/push", stats
hs.config.report_stats_endpoint, stats
)
except Exception as e:
logger.warn("Error reporting stats: %s", e)

View file

@ -155,8 +155,6 @@ def start(config_options):
"Please add ``enable_media_repo: false`` to the main config\n"
)
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -169,6 +167,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -184,8 +184,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.pusher"
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.start_pushers:
@ -210,6 +208,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ps, config, use_worker_options=True)
ps.setup()
def start():

View file

@ -435,8 +435,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.synchrotron"
setup_logging(config, use_worker_options=True)
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -450,6 +448,8 @@ def start(config_options):
application_service_handler=SynchrotronApplicationService(),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -197,8 +197,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.user_dir"
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@ -223,6 +221,8 @@ def start(config_options):
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners

View file

@ -107,7 +107,6 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
if e.code == 404:
return False
return
logger.warning("query_user to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("query_user to %s threw exception %s", uri, ex)
@ -127,7 +126,6 @@ class ApplicationServiceApi(SimpleHttpClient):
logger.warning("query_alias to %s received %s", uri, e.code)
if e.code == 404:
return False
return
except Exception as ex:
logger.warning("query_alias to %s threw exception %s", uri, ex)
return False
@ -230,7 +228,6 @@ class ApplicationServiceApi(SimpleHttpClient):
sent_transactions_counter.labels(service.id).inc()
sent_events_counter.labels(service.id).inc(len(events))
return True
return
except CodeMessageException as e:
logger.warning("push_bulk to %s received %s", uri, e.code)
except Exception as ex:

View file

@ -70,35 +70,37 @@ class ApplicationServiceScheduler(object):
self.store = hs.get_datastore()
self.as_api = hs.get_application_service_api()
def create_recoverer(service, callback):
return _Recoverer(self.clock, self.store, self.as_api, service, callback)
self.txn_ctrl = _TransactionController(
self.clock, self.store, self.as_api, create_recoverer
)
self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
@defer.inlineCallbacks
def start(self):
logger.info("Starting appservice scheduler")
# check for any DOWN ASes and start recoverers for them.
recoverers = yield _Recoverer.start(
self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
services = yield self.store.get_appservices_by_state(
ApplicationServiceState.DOWN
)
self.txn_ctrl.add_recoverers(recoverers)
for service in services:
self.txn_ctrl.start_recoverer(service)
def submit_event_for_as(self, service, event):
self.queuer.enqueue(service, event)
class _ServiceQueuer(object):
"""Queues events for the same application service together, sending
transactions as soon as possible. Once a transaction is sent successfully,
this schedules any other events in the queue to run.
"""Queue of events waiting to be sent to appservices.
Groups events into transactions per-appservice, and sends them on to the
TransactionController. Makes sure that we only have one transaction in flight per
appservice at a given time.
"""
def __init__(self, txn_ctrl, clock):
self.queued_events = {} # dict of {service_id: [events]}
# the appservices which currently have a transaction in flight
self.requests_in_flight = set()
self.txn_ctrl = txn_ctrl
self.clock = clock
@ -136,13 +138,29 @@ class _ServiceQueuer(object):
class _TransactionController(object):
def __init__(self, clock, store, as_api, recoverer_fn):
"""Transaction manager.
Builds AppServiceTransactions and runs their lifecycle. Also starts a Recoverer
if a transaction fails.
(Note we have only have one of these in the homeserver.)
Args:
clock (synapse.util.Clock):
store (synapse.storage.DataStore):
as_api (synapse.appservice.api.ApplicationServiceApi):
"""
def __init__(self, clock, store, as_api):
self.clock = clock
self.store = store
self.as_api = as_api
self.recoverer_fn = recoverer_fn
# keep track of how many recoverers there are
self.recoverers = []
# map from service id to recoverer instance
self.recoverers = {}
# for UTs
self.RECOVERER_CLASS = _Recoverer
@defer.inlineCallbacks
def send(self, service, events):
@ -154,42 +172,45 @@ class _TransactionController(object):
if sent:
yield txn.complete(self.store)
else:
run_in_background(self._start_recoverer, service)
run_in_background(self._on_txn_fail, service)
except Exception:
logger.exception("Error creating appservice transaction")
run_in_background(self._start_recoverer, service)
run_in_background(self._on_txn_fail, service)
@defer.inlineCallbacks
def on_recovered(self, recoverer):
self.recoverers.remove(recoverer)
logger.info(
"Successfully recovered application service AS ID %s", recoverer.service.id
)
self.recoverers.pop(recoverer.service.id)
logger.info("Remaining active recoverers: %s", len(self.recoverers))
yield self.store.set_appservice_state(
recoverer.service, ApplicationServiceState.UP
)
def add_recoverers(self, recoverers):
for r in recoverers:
self.recoverers.append(r)
if len(recoverers) > 0:
logger.info("New active recoverers: %s", len(self.recoverers))
@defer.inlineCallbacks
def _start_recoverer(self, service):
def _on_txn_fail(self, service):
try:
yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
logger.info(
"Application service falling behind. Starting recoverer. AS ID %s",
service.id,
)
recoverer = self.recoverer_fn(service, self.on_recovered)
self.add_recoverers([recoverer])
recoverer.recover()
self.start_recoverer(service)
except Exception:
logger.exception("Error starting AS recoverer")
def start_recoverer(self, service):
"""Start a Recoverer for the given service
Args:
service (synapse.appservice.ApplicationService):
"""
logger.info("Starting recoverer for AS ID %s", service.id)
assert service.id not in self.recoverers
recoverer = self.RECOVERER_CLASS(
self.clock, self.store, self.as_api, service, self.on_recovered
)
self.recoverers[service.id] = recoverer
recoverer.recover()
logger.info("Now %i active recoverers", len(self.recoverers))
@defer.inlineCallbacks
def _is_service_up(self, service):
state = yield self.store.get_appservice_state(service)
@ -197,18 +218,17 @@ class _TransactionController(object):
class _Recoverer(object):
@staticmethod
@defer.inlineCallbacks
def start(clock, store, as_api, callback):
services = yield store.get_appservices_by_state(ApplicationServiceState.DOWN)
recoverers = [_Recoverer(clock, store, as_api, s, callback) for s in services]
for r in recoverers:
logger.info(
"Starting recoverer for AS ID %s which was marked as " "DOWN",
r.service.id,
)
r.recover()
return recoverers
"""Manages retries and backoff for a DOWN appservice.
We have one of these for each appservice which is currently considered DOWN.
Args:
clock (synapse.util.Clock):
store (synapse.storage.DataStore):
as_api (synapse.appservice.api.ApplicationServiceApi):
service (synapse.appservice.ApplicationService): the service we are managing
callback (callable[_Recoverer]): called once the service recovers.
"""
def __init__(self, clock, store, as_api, service, callback):
self.clock = clock
@ -224,7 +244,9 @@ class _Recoverer(object):
"as-recoverer-%s" % (self.service.id,), self.retry
)
self.clock.call_later((2 ** self.backoff_counter), _retry)
delay = 2 ** self.backoff_counter
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
self.clock.call_later(delay, _retry)
def _backoff(self):
# cap the backoff to be around 8.5min => (2^9) = 512 secs
@ -234,25 +256,30 @@ class _Recoverer(object):
@defer.inlineCallbacks
def retry(self):
logger.info("Starting retries on %s", self.service.id)
try:
txn = yield self.store.get_oldest_unsent_txn(self.service)
if txn:
while True:
txn = yield self.store.get_oldest_unsent_txn(self.service)
if not txn:
# nothing left: we're done!
self.callback(self)
return
logger.info(
"Retrying transaction %s for AS ID %s", txn.id, txn.service.id
)
sent = yield txn.send(self.as_api)
if sent:
yield txn.complete(self.store)
# reset the backoff counter and retry immediately
self.backoff_counter = 1
yield self.retry()
else:
self._backoff()
else:
self._set_service_recovered()
except Exception as e:
logger.exception(e)
self._backoff()
if not sent:
break
def _set_service_recovered(self):
self.callback(self)
yield txn.complete(self.store)
# reset the backoff counter and then process the next transaction
self.backoff_counter = 1
except Exception:
logger.exception("Unexpected error running retries")
# we didn't manage to send all of the transactions before we got an error of
# some flavour: reschedule the next retry.
self._backoff()

View file

@ -13,8 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import ConfigError
from ._base import ConfigError, find_config_files
# export ConfigError if somebody does import *
# export ConfigError and find_config_files if somebody does
# import *
# this is largely a fudge to stop PEP8 moaning about the import
__all__ = ["ConfigError"]
__all__ = ["ConfigError", "find_config_files"]

View file

@ -181,6 +181,11 @@ class Config(object):
generate_secrets=False,
report_stats=None,
open_private_ports=False,
listeners=None,
database_conf=None,
tls_certificate_path=None,
tls_private_key_path=None,
acme_domain=None,
):
"""Build a default configuration file
@ -207,6 +212,33 @@ class Config(object):
open_private_ports (bool): True to leave private ports (such as the non-TLS
HTTP listener) open to the internet.
listeners (list(dict)|None): A list of descriptions of the listeners
synapse should start with each of which specifies a port (str), a list of
resources (list(str)), tls (bool) and type (str). For example:
[{
"port": 8448,
"resources": [{"names": ["federation"]}],
"tls": True,
"type": "http",
},
{
"port": 443,
"resources": [{"names": ["client"]}],
"tls": False,
"type": "http",
}],
database (str|None): The database type to configure, either `psycog2`
or `sqlite3`.
tls_certificate_path (str|None): The path to the tls certificate.
tls_private_key_path (str|None): The path to the tls private key.
acme_domain (str|None): The domain acme will try to validate. If
specified acme will be enabled.
Returns:
str: the yaml config file
"""
@ -220,6 +252,11 @@ class Config(object):
generate_secrets=generate_secrets,
report_stats=report_stats,
open_private_ports=open_private_ports,
listeners=listeners,
database_conf=database_conf,
tls_certificate_path=tls_certificate_path,
tls_private_key_path=tls_private_key_path,
acme_domain=acme_domain,
)
)

View file

@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from textwrap import indent
import yaml
from ._base import Config
@ -38,20 +41,28 @@ class DatabaseConfig(Config):
self.set_databasepath(config.get("database_path"))
def generate_config_section(self, data_dir_path, **kwargs):
database_path = os.path.join(data_dir_path, "homeserver.db")
return (
"""\
## Database ##
database:
# The database engine name
def generate_config_section(self, data_dir_path, database_conf, **kwargs):
if not database_conf:
database_path = os.path.join(data_dir_path, "homeserver.db")
database_conf = (
"""# The database engine name
name: "sqlite3"
# Arguments to pass to the engine
args:
# Path to the database
database: "%(database_path)s"
"""
% locals()
)
else:
database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip()
return (
"""\
## Database ##
database:
%(database_conf)s
# Number of events to cache in memory.
#
#event_cache_size: 10K

View file

@ -20,6 +20,7 @@ from __future__ import print_function
# This file can't be called email.py because if it is, we cannot:
import email.utils
import os
from enum import Enum
import pkg_resources
@ -74,19 +75,48 @@ class EmailConfig(Config):
"renew_at"
)
email_trust_identity_server_for_password_resets = email_config.get(
"trust_identity_server_for_password_resets", False
self.threepid_behaviour_email = (
# Have Synapse handle the email sending if account_threepid_delegates.email
# is not defined
# msisdn is currently always remote while Synapse does not support any method of
# sending SMS messages
ThreepidBehaviour.REMOTE
if self.account_threepid_delegate_email
else ThreepidBehaviour.LOCAL
)
self.email_password_reset_behaviour = (
"remote" if email_trust_identity_server_for_password_resets else "local"
)
self.password_resets_were_disabled_due_to_email_config = False
if self.email_password_reset_behaviour == "local" and email_config == {}:
# Prior to Synapse v1.4.0, there was another option that defined whether Synapse would
# use an identity server to password reset tokens on its behalf. We now warn the user
# if they have this set and tell them to use the updated option, while using a default
# identity server in the process.
self.using_identity_server_from_trusted_list = False
if (
not self.account_threepid_delegate_email
and config.get("trust_identity_server_for_password_resets", False) is True
):
# Use the first entry in self.trusted_third_party_id_servers instead
if self.trusted_third_party_id_servers:
# XXX: It's a little confusing that account_threepid_delegate_email is modified
# both in RegistrationConfig and here. We should factor this bit out
self.account_threepid_delegate_email = self.trusted_third_party_id_servers[
0
]
self.using_identity_server_from_trusted_list = True
else:
raise ConfigError(
"Attempted to use an identity server from"
'"trusted_third_party_id_servers" but it is empty.'
)
self.local_threepid_handling_disabled_due_to_email_config = False
if (
self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
and email_config == {}
):
# We cannot warn the user this has happened here
# Instead do so when a user attempts to reset their password
self.password_resets_were_disabled_due_to_email_config = True
self.local_threepid_handling_disabled_due_to_email_config = True
self.email_password_reset_behaviour = "off"
self.threepid_behaviour_email = ThreepidBehaviour.OFF
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
@ -96,7 +126,7 @@ class EmailConfig(Config):
if (
self.email_enable_notifs
or account_validity_renewal_enabled
or self.email_password_reset_behaviour == "local"
or self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
):
# make sure we can import the required deps
import jinja2
@ -106,7 +136,7 @@ class EmailConfig(Config):
jinja2
bleach
if self.email_password_reset_behaviour == "local":
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
required = ["smtp_host", "smtp_port", "notif_from"]
missing = []
@ -115,7 +145,7 @@ class EmailConfig(Config):
missing.append("email." + k)
if config.get("public_baseurl") is None:
missing.append("public_base_url")
missing.append("public_baseurl")
if len(missing) > 0:
raise RuntimeError(
@ -125,28 +155,45 @@ class EmailConfig(Config):
% (", ".join(missing),)
)
# Templates for password reset emails
# These email templates have placeholders in them, and thus must be
# parsed using a templating engine during a request
self.email_password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html"
)
self.email_password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt"
)
self.email_password_reset_failure_template = email_config.get(
"password_reset_failure_template", "password_reset_failure.html"
self.email_registration_template_html = email_config.get(
"registration_template_html", "registration.html"
)
# This template does not support any replaceable variables, so we will
# read it from the disk once during setup
email_password_reset_success_template = email_config.get(
"password_reset_success_template", "password_reset_success.html"
self.email_registration_template_text = email_config.get(
"registration_template_text", "registration.txt"
)
self.email_password_reset_template_failure_html = email_config.get(
"password_reset_template_failure_html", "password_reset_failure.html"
)
self.email_registration_template_failure_html = email_config.get(
"registration_template_failure_html", "registration_failure.html"
)
# These templates do not support any placeholder variables, so we
# will read them from disk once during setup
email_password_reset_template_success_html = email_config.get(
"password_reset_template_success_html", "password_reset_success.html"
)
email_registration_template_success_html = email_config.get(
"registration_template_success_html", "registration_success.html"
)
# Check templates exist
for f in [
self.email_password_reset_template_html,
self.email_password_reset_template_text,
self.email_password_reset_failure_template,
email_password_reset_success_template,
self.email_registration_template_html,
self.email_registration_template_text,
self.email_password_reset_template_failure_html,
email_password_reset_template_success_html,
email_registration_template_success_html,
]:
p = os.path.join(self.email_template_dir, f)
if not os.path.isfile(p):
@ -154,11 +201,17 @@ class EmailConfig(Config):
# Retrieve content of web templates
filepath = os.path.join(
self.email_template_dir, email_password_reset_success_template
self.email_template_dir, email_password_reset_template_success_html
)
self.email_password_reset_success_html_content = self.read_file(
self.email_password_reset_template_success_html = self.read_file(
filepath, "email.password_reset_template_success_html"
)
filepath = os.path.join(
self.email_template_dir, email_registration_template_success_html
)
self.email_registration_template_success_html_content = self.read_file(
filepath, "email.registration_template_success_html"
)
if self.email_enable_notifs:
required = [
@ -239,19 +292,6 @@ class EmailConfig(Config):
# #
# riot_base_url: "http://localhost/riot"
#
# # Enable sending password reset emails via the configured, trusted
# # identity servers
# #
# # IMPORTANT! This will give a malicious or overtaken identity server
# # the ability to reset passwords for your users! Make absolutely sure
# # that you want to do this! It is strongly recommended that password
# # reset emails be sent by the homeserver instead
# #
# # If this option is set to false and SMTP options have not been
# # configured, resetting user passwords via email will be disabled
# #
# #trust_identity_server_for_password_resets: false
#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
@ -283,9 +323,35 @@ class EmailConfig(Config):
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
# # Templates for registration emails sent by the homeserver
# #
# #registration_template_html: registration.html
# #registration_template_text: registration.txt
#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
#
# # Templates for registration success and failure pages that a user
# # will see after attempting to register using an email or phone
# #
# #registration_template_success_html: registration_success.html
# #registration_template_failure_html: registration_failure.html
"""
class ThreepidBehaviour(Enum):
"""
Enum to define the behaviour of Synapse with regards to when it contacts an identity
server for 3pid registration and password resets
REMOTE = use an external server to send tokens
LOCAL = send tokens ourselves
OFF = disable registration via 3pid and password resets
"""
REMOTE = "remote"
LOCAL = "local"
OFF = "off"

View file

@ -76,7 +76,7 @@ class KeyConfig(Config):
config_dir_path, config["server_name"] + ".signing.key"
)
self.signing_key = self.read_signing_key(signing_key_path)
self.signing_key = self.read_signing_keys(signing_key_path, "signing_key")
self.old_signing_keys = self.read_old_signing_keys(
config.get("old_signing_keys", {})
@ -85,6 +85,14 @@ class KeyConfig(Config):
config.get("key_refresh_interval", "1d")
)
key_server_signing_keys_path = config.get("key_server_signing_keys_path")
if key_server_signing_keys_path:
self.key_server_signing_keys = self.read_signing_keys(
key_server_signing_keys_path, "key_server_signing_keys_path"
)
else:
self.key_server_signing_keys = list(self.signing_key)
# if neither trusted_key_servers nor perspectives are given, use the default.
if "perspectives" not in config and "trusted_key_servers" not in config:
key_servers = [{"server_name": "matrix.org"}]
@ -210,16 +218,34 @@ class KeyConfig(Config):
#
#trusted_key_servers:
# - server_name: "matrix.org"
#
# The signing keys to use when acting as a trusted key server. If not specified
# defaults to the server signing key.
#
# Can contain multiple keys, one per line.
#
#key_server_signing_keys_path: "key_server_signing_keys.key"
"""
% locals()
)
def read_signing_key(self, signing_key_path):
signing_keys = self.read_file(signing_key_path, "signing_key")
def read_signing_keys(self, signing_key_path, name):
"""Read the signing keys in the given path.
Args:
signing_key_path (str)
name (str): Associated config key name
Returns:
list[SigningKey]
"""
signing_keys = self.read_file(signing_key_path, name)
try:
return read_signing_keys(signing_keys.splitlines(True))
except Exception as e:
raise ConfigError("Error reading signing_key: %s" % (str(e)))
raise ConfigError("Error reading %s: %s" % (name, str(e)))
def read_old_signing_keys(self, old_signing_keys):
keys = {}

View file

@ -21,10 +21,19 @@ from string import Template
import yaml
from twisted.logger import STDLibLogObserver, globalLogBeginner
from twisted.logger import (
ILogObserver,
LogBeginner,
STDLibLogObserver,
globalLogBeginner,
)
import synapse
from synapse.app import _base as appbase
from synapse.logging._structured import (
reload_structured_logging,
setup_structured_logging,
)
from synapse.logging.context import LoggingContextFilter
from synapse.util.versionstring import get_version_string
@ -85,7 +94,8 @@ class LoggingConfig(Config):
"""\
## Logging ##
# A yaml python logging config file
# A yaml python logging config file as described by
# https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
#
log_config: "%(log_config)s"
"""
@ -119,21 +129,10 @@ class LoggingConfig(Config):
log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file))
def setup_logging(config, use_worker_options=False):
""" Set up python logging
Args:
config (LoggingConfig | synapse.config.workers.WorkerConfig):
configuration data
use_worker_options (bool): True to use the 'worker_log_config' option
instead of 'log_config'.
register_sighup (func | None): Function to call to register a
sighup handler.
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
"""
Set up Python stdlib logging.
"""
log_config = config.worker_log_config if use_worker_options else config.log_config
if log_config is None:
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
@ -151,35 +150,10 @@ def setup_logging(config, use_worker_options=False):
handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
else:
logging.config.dictConfig(log_config)
def load_log_config():
with open(log_config, "r") as f:
logging.config.dictConfig(yaml.safe_load(f))
def sighup(*args):
# it might be better to use a file watcher or something for this.
load_log_config()
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
load_log_config()
appbase.register_sighup(sighup)
# make sure that the first thing we log is a thing we can grep backwards
# for
logging.warn("***** STARTING SERVER *****")
logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
logging.info("Server hostname: %s", config.server_name)
# It's critical to point twisted's internal logging somewhere, otherwise it
# stacks up and leaks kup to 64K object;
# see: https://twistedmatrix.com/trac/ticket/8164
#
# Routing to the python logging framework could be a performance problem if
# the handlers blocked for a long time as python.logging is a blocking API
# see https://twistedmatrix.com/documents/current/core/howto/logger.html
# filed as https://github.com/matrix-org/synapse/issues/1727
#
# However this may not be too much of a problem if we are just writing to a file.
# Route Twisted's native logging through to the standard library logging
# system.
observer = STDLibLogObserver()
def _log(event):
@ -196,8 +170,71 @@ def setup_logging(config, use_worker_options=False):
return observer(event)
globalLogBeginner.beginLoggingTo(
[_log], redirectStandardIO=not config.no_redirect_stdio
)
logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio)
if not config.no_redirect_stdio:
print("Redirected stdout/stderr to logs")
return observer
def _reload_stdlib_logging(*args, log_config=None):
logger = logging.getLogger("")
if not log_config:
logger.warn("Reloaded a blank config?")
logging.config.dictConfig(log_config)
def setup_logging(
hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner
) -> ILogObserver:
"""
Set up the logging subsystem.
Args:
config (LoggingConfig | synapse.config.workers.WorkerConfig):
configuration data
use_worker_options (bool): True to use the 'worker_log_config' option
instead of 'log_config'.
logBeginner: The Twisted logBeginner to use.
Returns:
The "root" Twisted Logger observer, suitable for sending logs to from a
Logger instance.
"""
log_config = config.worker_log_config if use_worker_options else config.log_config
def read_config(*args, callback=None):
if log_config is None:
return None
with open(log_config, "rb") as f:
log_config_body = yaml.safe_load(f.read())
if callback:
callback(log_config=log_config_body)
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
return log_config_body
log_config_body = read_config()
if log_config_body and log_config_body.get("structured") is True:
logger = setup_structured_logging(
hs, config, log_config_body, logBeginner=logBeginner
)
appbase.register_sighup(read_config, callback=reload_structured_logging)
else:
logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner)
appbase.register_sighup(read_config, callback=_reload_stdlib_logging)
# make sure that the first thing we log is a thing we can grep backwards
# for
logging.warn("***** STARTING SERVER *****")
logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
logging.info("Server hostname: %s", config.server_name)
return logger

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,26 +14,47 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import attr
from synapse.python_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
MISSING_SENTRY = """Missing sentry-sdk library. This is required to enable sentry
integration.
"""
@attr.s
class MetricsFlags(object):
known_servers = attr.ib(default=False, validator=attr.validators.instance_of(bool))
@classmethod
def all_off(cls):
"""
Instantiate the flags with all options set to off.
"""
return cls(**{x.name: False for x in attr.fields(cls)})
class MetricsConfig(Config):
def read_config(self, config, **kwargs):
self.enable_metrics = config.get("enable_metrics", False)
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
)
self.metrics_port = config.get("metrics_port")
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
if self.enable_metrics:
_metrics_config = config.get("metrics_flags") or {}
self.metrics_flags = MetricsFlags(**_metrics_config)
else:
self.metrics_flags = MetricsFlags.all_off()
self.sentry_enabled = "sentry" in config
if self.sentry_enabled:
try:
import sentry_sdk # noqa F401
except ImportError:
raise ConfigError(MISSING_SENTRY)
check_requirements("sentry")
except DependencyException as e:
raise ConfigError(e.message)
self.sentry_dsn = config["sentry"].get("dsn")
if not self.sentry_dsn:
@ -58,6 +80,16 @@ class MetricsConfig(Config):
#sentry:
# dsn: "..."
# Flags to enable Prometheus metrics which are not suitable to be
# enabled by default, either for performance reasons or limited use.
#
metrics_flags:
# Publish synapse_federation_known_servers, a g auge of the number of
# servers this homeserver knows about, including itself. May cause
# performance problems on large homeservers.
#
#known_servers: true
# Whether or not to report anonymized homeserver usage statistics.
"""
@ -66,4 +98,10 @@ class MetricsConfig(Config):
else:
res += "report_stats: %s\n" % ("true" if report_stats else "false")
res += """
# The endpoint to report the anonymized homeserver usage statistics to.
# Defaults to https://matrix.org/report-usage-stats/push
#
#report_stats_endpoint: https://example.com/report-usage-stats/push
"""
return res

View file

@ -80,6 +80,12 @@ class RatelimitConfig(Config):
"federation_rr_transactions_per_room_per_second", 50
)
rc_admin_redaction = config.get("rc_admin_redaction")
if rc_admin_redaction:
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
else:
self.rc_admin_redaction = None
def generate_config_section(self, **kwargs):
return """\
## Ratelimiting ##
@ -102,6 +108,9 @@ class RatelimitConfig(Config):
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
# - one for ratelimiting redactions by room admins. If this is not explicitly
# set then it uses the same ratelimiting as per rc_message. This is useful
# to allow room admins to deal with abuse quickly.
#
# The defaults are as shown below.
#
@ -123,6 +132,10 @@ class RatelimitConfig(Config):
# failed_attempts:
# per_second: 0.17
# burst_count: 3
#
#rc_admin_redaction:
# per_second: 1
# burst_count: 50
# Ratelimiting settings for incoming federation

View file

@ -99,6 +99,10 @@ class RegistrationConfig(Config):
self.trusted_third_party_id_servers = config.get(
"trusted_third_party_id_servers", ["matrix.org", "vector.im"]
)
account_threepid_delegates = config.get("account_threepid_delegates") or {}
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
@ -257,10 +261,42 @@ class RegistrationConfig(Config):
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
#
# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
# background migration script, informing itself that the identity server all of its
# 3PIDs have been bound to is likely one of the below.
#
# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
# it is now solely used for the purposes of the background migration script, and can be
# removed once it has run.
#trusted_third_party_id_servers:
# - matrix.org
# - vector.im
# Handle threepid (email/phone etc) registration and password resets through a set of
# *trusted* identity servers. Note that this allows the configured identity server to
# reset passwords for accounts!
#
# Be aware that if `email` is not set, and SMTP options have not been
# configured in the email config block, registration and user password resets via
# email will be globally disabled.
#
# Additionally, if `msisdn` is not set, registration and password resets via msisdn
# will be disabled regardless. This is due to Synapse currently not supporting any
# method of sending SMS messages on its own.
#
# To enable using an identity server for operations regarding a particular third-party
# identifier type, set the value to the URL of that identity server as shown in the
# examples below.
#
# Servers handling the these requests must answer the `/requestToken` endpoints defined
# by the Matrix Identity Service API specification:
# https://matrix.org/docs/spec/identity_service/latest
#
account_threepid_delegates:
#email: https://example.com # Delegate email sending to matrix.org
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
# Users who register on this homeserver will automatically be joined
# to these rooms
#

View file

@ -16,6 +16,7 @@
import os
from collections import namedtuple
from synapse.python_dependencies import DependencyException, check_requirements
from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
@ -34,17 +35,6 @@ THUMBNAIL_SIZE_YAML = """\
# method: %(method)s
"""
MISSING_NETADDR = "Missing netaddr library. This is required for URL preview API."
MISSING_LXML = """Missing lxml library. This is required for URL preview API.
Install by running:
pip install lxml
Requires libxslt1-dev system package.
"""
ThumbnailRequirement = namedtuple(
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
)
@ -171,16 +161,10 @@ class ContentRepositoryConfig(Config):
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
try:
import lxml
check_requirements("url_preview")
lxml # To stop unused lint.
except ImportError:
raise ConfigError(MISSING_LXML)
try:
from netaddr import IPSet
except ImportError:
raise ConfigError(MISSING_NETADDR)
except DependencyException as e:
raise ConfigError(e.message)
if "url_preview_ip_range_blacklist" not in config:
raise ConfigError(
@ -189,6 +173,9 @@ class ContentRepositoryConfig(Config):
"to work"
)
# netaddr is a dependency for url_preview
from netaddr import IPSet
self.url_preview_ip_range_blacklist = IPSet(
config["url_preview_ip_range_blacklist"]
)

View file

@ -17,8 +17,11 @@
import logging
import os.path
import re
from textwrap import indent
import attr
import yaml
from netaddr import IPSet
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
@ -159,6 +162,16 @@ class ServerConfig(Config):
self.mau_trial_days = config.get("mau_trial_days", 0)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
redaction_retention_period = config.get("redaction_retention_period", "7d")
if redaction_retention_period is not None:
self.redaction_retention_period = self.parse_duration(
redaction_retention_period
)
else:
self.redaction_retention_period = None
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
@ -325,7 +338,7 @@ class ServerConfig(Config):
(
"The metrics_port configuration option is deprecated in Synapse 0.31 "
"in favour of a listener. Please see "
"http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
"http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md"
" on how to configure the new listener."
)
)
@ -352,7 +365,7 @@ class ServerConfig(Config):
return any(l["tls"] for l in self.listeners)
def generate_config_section(
self, server_name, data_dir_path, open_private_ports, **kwargs
self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
):
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
@ -366,11 +379,68 @@ class ServerConfig(Config):
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
# default config string
default_room_version = DEFAULT_ROOM_VERSION
secure_listeners = []
unsecure_listeners = []
private_addresses = ["::1", "127.0.0.1"]
if listeners:
for listener in listeners:
if listener["tls"]:
secure_listeners.append(listener)
else:
# If we don't want open ports we need to bind the listeners
# to some address other than 0.0.0.0. Here we chose to use
# localhost.
# If the addresses are already bound we won't overwrite them
# however.
if not open_private_ports:
listener.setdefault("bind_addresses", private_addresses)
unsecure_http_binding = "port: %i\n tls: false" % (unsecure_port,)
if not open_private_ports:
unsecure_http_binding += (
"\n bind_addresses: ['::1', '127.0.0.1']"
unsecure_listeners.append(listener)
secure_http_bindings = indent(
yaml.dump(secure_listeners), " " * 10
).lstrip()
unsecure_http_bindings = indent(
yaml.dump(unsecure_listeners), " " * 10
).lstrip()
if not unsecure_listeners:
unsecure_http_bindings = (
"""- port: %(unsecure_port)s
tls: false
type: http
x_forwarded: true"""
% locals()
)
if not open_private_ports:
unsecure_http_bindings += (
"\n bind_addresses: ['::1', '127.0.0.1']"
)
unsecure_http_bindings += """
resources:
- names: [client, federation]
compress: false"""
if listeners:
# comment out this block
unsecure_http_bindings = "#" + re.sub(
"\n {10}",
lambda match: match.group(0) + "#",
unsecure_http_bindings,
)
if not secure_listeners:
secure_http_bindings = (
"""#- port: %(bind_port)s
# type: http
# tls: true
# resources:
# - names: [client, federation]"""
% locals()
)
return (
@ -501,8 +571,8 @@ class ServerConfig(Config):
#
# type: the type of listener. Normally 'http', but other valid options are:
# 'manhole' (see docs/manhole.md),
# 'metrics' (see docs/metrics-howto.rst),
# 'replication' (see docs/workers.rst).
# 'metrics' (see docs/metrics-howto.md),
# 'replication' (see docs/workers.md).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
@ -537,12 +607,12 @@ class ServerConfig(Config):
#
# media: the media API (/_matrix/media).
#
# metrics: the metrics interface. See docs/metrics-howto.rst.
# metrics: the metrics interface. See docs/metrics-howto.md.
#
# openid: OpenID authentication.
#
# replication: the HTTP replication API (/_synapse/replication). See
# docs/workers.rst.
# docs/workers.md.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
@ -556,25 +626,15 @@ class ServerConfig(Config):
# will also need to give Synapse a TLS key and certificate: see the TLS section
# below.)
#
#- port: %(bind_port)s
# type: http
# tls: true
# resources:
# - names: [client, federation]
%(secure_http_bindings)s
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
#
- %(unsecure_http_binding)s
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
%(unsecure_http_bindings)s
# example additional_resources:
#
@ -668,6 +728,13 @@ class ServerConfig(Config):
# Defaults to 'true'.
#
#allow_per_room_profiles: false
# How long to keep redacted events in unredacted form in the database. After
# this period redacted events get replaced with their redacted form in the DB.
#
# Defaults to `7d`. Set to `null` to disable.
#
redaction_retention_period: 7d
"""
% locals()
)

View file

@ -27,19 +27,16 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
self.stats_bucket_size = 86400
self.stats_bucket_size = 86400 * 1000
self.stats_retention = sys.maxsize
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
self.stats_bucket_size = (
self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000
self.stats_bucket_size = self.parse_duration(
stats_config.get("bucket_size", "1d")
)
self.stats_retention = (
self.parse_duration(
stats_config.get("retention", "%ds" % (sys.maxsize,))
)
/ 1000
self.stats_retention = self.parse_duration(
stats_config.get("retention", "%ds" % (sys.maxsize,))
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):

View file

@ -110,8 +110,15 @@ class TlsConfig(Config):
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = []
for entry in fed_whitelist_entries:
try:
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
except UnicodeEncodeError:
raise ConfigError(
"IDNA domain names are not allowed in the "
"federation_certificate_verification_whitelist: %s" % (entry,)
)
# Convert globs to regex
entry_regex = glob_to_regex(entry)
self.federation_certificate_verification_whitelist.append(entry_regex)
# List of custom certificate authorities for federation traffic validation
@ -239,12 +246,38 @@ class TlsConfig(Config):
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
def generate_config_section(
self, config_dir_path, server_name, data_dir_path, **kwargs
self,
config_dir_path,
server_name,
data_dir_path,
tls_certificate_path,
tls_private_key_path,
acme_domain,
**kwargs
):
"""If the acme_domain is specified acme will be enabled.
If the TLS paths are not specified the default will be certs in the
config directory"""
base_key_name = os.path.join(config_dir_path, server_name)
tls_certificate_path = base_key_name + ".tls.crt"
tls_private_key_path = base_key_name + ".tls.key"
if bool(tls_certificate_path) != bool(tls_private_key_path):
raise ConfigError(
"Please specify both a cert path and a key path or neither."
)
tls_enabled = (
"" if tls_certificate_path and tls_private_key_path or acme_domain else "#"
)
if not tls_certificate_path:
tls_certificate_path = base_key_name + ".tls.crt"
if not tls_private_key_path:
tls_private_key_path = base_key_name + ".tls.key"
acme_enabled = bool(acme_domain)
acme_domain = "matrix.example.com"
default_acme_account_file = os.path.join(data_dir_path, "acme_account.key")
# this is to avoid the max line length. Sorrynotsorry
@ -269,11 +302,11 @@ class TlsConfig(Config):
# instance, if using certbot, use `fullchain.pem` as your certificate,
# not `cert.pem`).
#
#tls_certificate_path: "%(tls_certificate_path)s"
%(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
#
#tls_private_key_path: "%(tls_private_key_path)s"
%(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s"
# Whether to verify TLS server certificates for outbound federation requests.
#
@ -340,10 +373,10 @@ class TlsConfig(Config):
# permission to listen on port 80.
#
acme:
# ACME support is disabled by default. Uncomment the following line
# (and tls_certificate_path and tls_private_key_path above) to enable it.
# ACME support is disabled by default. Set this to `true` and uncomment
# tls_certificate_path and tls_private_key_path above to enable it.
#
#enabled: true
enabled: %(acme_enabled)s
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
@ -354,17 +387,17 @@ class TlsConfig(Config):
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
#port: 80
port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
#bind_addresses: ['::', '0.0.0.0']
bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
#reprovision_threshold: 30
reprovision_threshold: 30
# The domain that the certificate should be for. Normally this
# should be the same as your Matrix domain (i.e., 'server_name'), but,
@ -378,7 +411,7 @@ class TlsConfig(Config):
#
# If not set, defaults to your 'server_name'.
#
#domain: matrix.example.com
domain: %(acme_domain)s
# file to use for the account key. This will be generated if it doesn't
# exist.

View file

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.python_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
@ -32,6 +34,11 @@ class TracerConfig(Config):
if not self.opentracer_enabled:
return
try:
check_requirements("opentracing")
except DependencyException as e:
raise ConfigError(e.message)
# The tracer is enabled so sanitize the config
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])

View file

@ -15,7 +15,6 @@
import logging
import idna
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname, verify_ip_address
from zope.interface import implementer
@ -114,14 +113,20 @@ class ClientTLSOptionsFactory(object):
self._no_verify_ssl_context = self._no_verify_ssl.getContext()
self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
def get_options(self, host):
def get_options(self, host: bytes):
# IPolicyForHTTPS.get_options takes bytes, but we want to compare
# against the str whitelist. The hostnames in the whitelist are already
# IDNA-encoded like the hosts will be here.
ascii_host = host.decode("ascii")
# Check if certificate verification has been enabled
should_verify = self._config.federation_verify_certificates
# Check if we've disabled certificate verification for this host
if should_verify:
for regex in self._config.federation_certificate_verification_whitelist:
if regex.match(host):
if regex.match(ascii_host):
should_verify = False
break
@ -162,7 +167,7 @@ class SSLClientConnectionCreator(object):
Replaces twisted.internet.ssl.ClientTLSOptions
"""
def __init__(self, hostname, ctx, verify_certs):
def __init__(self, hostname: bytes, ctx, verify_certs: bool):
self._ctx = ctx
self._verifier = ConnectionVerifier(hostname, verify_certs)
@ -190,21 +195,16 @@ class ConnectionVerifier(object):
# This code is based on twisted.internet.ssl.ClientTLSOptions.
def __init__(self, hostname, verify_certs):
def __init__(self, hostname: bytes, verify_certs):
self._verify_certs = verify_certs
if isIPAddress(hostname) or isIPv6Address(hostname):
self._hostnameBytes = hostname.encode("ascii")
_decoded = hostname.decode("ascii")
if isIPAddress(_decoded) or isIPv6Address(_decoded):
self._is_ip_address = True
else:
# twisted's ClientTLSOptions falls back to the stdlib impl here if
# idna is not installed, but points out that lacks support for
# IDNA2008 (http://bugs.python.org/issue17305).
#
# We can rely on having idna.
self._hostnameBytes = idna.encode(hostname)
self._is_ip_address = False
self._hostnameBytes = hostname
self._hostnameASCII = self._hostnameBytes.decode("ascii")
def verify_context_info_cb(self, ssl_connection, where):

View file

@ -83,7 +83,7 @@ def compute_content_hash(event_dict, hash_algorithm):
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
return (hashed.name, hashed.digest())
return hashed.name, hashed.digest()
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
@ -106,7 +106,7 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
event_dict.pop("unsigned", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
return (hashed.name, hashed.digest())
return hashed.name, hashed.digest()
def compute_event_signature(event_dict, signature_name, signing_key):

View file

@ -18,7 +18,6 @@ import logging
from collections import defaultdict
import six
from six import raise_from
from six.moves import urllib
import attr
@ -30,7 +29,6 @@ from signedjson.key import (
from signedjson.sign import (
SignatureVerifyException,
encode_canonical_json,
sign_json,
signature_ids,
verify_signed_json,
)
@ -540,13 +538,7 @@ class BaseV2KeyFetcher(object):
verify_key=verify_key, valid_until_ts=key_data["expired_ts"]
)
# re-sign the json with our own key, so that it is ready if we are asked to
# give it out as a notary server
signed_key_json = sign_json(
response_json, self.config.server_name, self.config.signing_key[0]
)
signed_key_json_bytes = encode_canonical_json(signed_key_json)
key_json_bytes = encode_canonical_json(response_json)
yield make_deferred_yieldable(
defer.gatherResults(
@ -558,7 +550,7 @@ class BaseV2KeyFetcher(object):
from_server=from_server,
ts_now_ms=time_added_ms,
ts_expires_ms=ts_valid_until_ms,
key_json_bytes=signed_key_json_bytes,
key_json_bytes=key_json_bytes,
)
for key_id in verify_keys
],
@ -657,9 +649,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
},
)
except (NotRetryingDestination, RequestSendFailed) as e:
raise_from(KeyLookupError("Failed to connect to remote server"), e)
# these both have str() representations which we can't really improve upon
raise KeyLookupError(str(e))
except HttpResponseException as e:
raise_from(KeyLookupError("Remote server returned an error"), e)
raise KeyLookupError("Remote server returned an error: %s" % (e,))
keys = {}
added_keys = []
@ -821,9 +814,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
timeout=10000,
)
except (NotRetryingDestination, RequestSendFailed) as e:
raise_from(KeyLookupError("Failed to connect to remote server"), e)
# these both have str() representations which we can't really improve
# upon
raise KeyLookupError(str(e))
except HttpResponseException as e:
raise_from(KeyLookupError("Remote server returned an error"), e)
raise KeyLookupError("Remote server returned an error: %s" % (e,))
if response["server_name"] != server_name:
raise KeyLookupError(

View file

@ -637,11 +637,11 @@ def auth_types_for_event(event):
if event.type == EventTypes.Create:
return []
auth_types = []
auth_types.append((EventTypes.PowerLevels, ""))
auth_types.append((EventTypes.Member, event.sender))
auth_types.append((EventTypes.Create, ""))
auth_types = [
(EventTypes.PowerLevels, ""),
(EventTypes.Member, event.sender),
(EventTypes.Create, ""),
]
if event.type == EventTypes.Member:
membership = event.content["membership"]

View file

@ -355,7 +355,7 @@ class FederationClient(FederationBase):
auth_chain.sort(key=lambda e: e.depth)
return (pdus, auth_chain)
return pdus, auth_chain
except HttpResponseException as e:
if e.code == 400 or e.code == 404:
logger.info("Failed to use get_room_state_ids API, falling back")
@ -404,7 +404,7 @@ class FederationClient(FederationBase):
signed_auth.sort(key=lambda e: e.depth)
return (signed_pdus, signed_auth)
return signed_pdus, signed_auth
@defer.inlineCallbacks
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
@ -429,7 +429,7 @@ class FederationClient(FederationBase):
missing_events.discard(k)
if not missing_events:
return (signed_events, failed_to_fetch)
return signed_events, failed_to_fetch
logger.debug(
"Fetching unknown state/auth events %s for room %s",
@ -465,7 +465,7 @@ class FederationClient(FederationBase):
# We removed all events we successfully fetched from `batch`
failed_to_fetch.update(batch)
return (signed_events, failed_to_fetch)
return signed_events, failed_to_fetch
@defer.inlineCallbacks
@log_function

View file

@ -43,6 +43,7 @@ from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
@ -99,7 +100,7 @@ class FederationServer(FederationBase):
res = self._transaction_from_pdus(pdus).get_dict()
return (200, res)
return 200, res
@defer.inlineCallbacks
@log_function
@ -162,7 +163,7 @@ class FederationServer(FederationBase):
yield self.transaction_actions.set_response(
origin, transaction, 400, response
)
return (400, response)
return 400, response
received_pdus_counter.inc(len(transaction.pdus))
@ -264,7 +265,7 @@ class FederationServer(FederationBase):
logger.debug("Returning: %s", str(response))
yield self.transaction_actions.set_response(origin, transaction, 200, response)
return (200, response)
return 200, response
@defer.inlineCallbacks
def received_edu(self, origin, edu_type, content):
@ -297,7 +298,7 @@ class FederationServer(FederationBase):
event_id,
)
return (200, resp)
return 200, resp
@defer.inlineCallbacks
def on_state_ids_request(self, origin, room_id, event_id):
@ -314,7 +315,7 @@ class FederationServer(FederationBase):
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
@defer.inlineCallbacks
def _on_context_state_request_compute(self, room_id, event_id):
@ -344,15 +345,15 @@ class FederationServer(FederationBase):
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return (200, self._transaction_from_pdus([pdu]).get_dict())
return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
return (404, "")
return 404, ""
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
resp = yield self.registry.on_query(query_type, args)
return (200, resp)
return 200, resp
@defer.inlineCallbacks
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
@ -434,7 +435,7 @@ class FederationServer(FederationBase):
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
yield self.handler.on_send_leave_request(origin, pdu)
return (200, {})
return 200, {}
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
@ -445,7 +446,7 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
auth_pdus = yield self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return (200, res)
return 200, res
@defer.inlineCallbacks
def on_query_auth_request(self, origin, content, room_id, event_id):
@ -498,7 +499,7 @@ class FederationServer(FederationBase):
"missing": ret.get("missing", []),
}
return (200, send_content)
return 200, send_content
@log_function
def on_query_client_keys(self, origin, content):
@ -507,6 +508,7 @@ class FederationServer(FederationBase):
def on_query_user_devices(self, origin, user_id):
return self.on_query_request("user_devices", user_id)
@trace
@defer.inlineCallbacks
@log_function
def on_claim_client_keys(self, origin, content):
@ -515,6 +517,7 @@ class FederationServer(FederationBase):
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = yield self.store.claim_e2e_one_time_keys(query)
json_result = {}
@ -666,9 +669,9 @@ class FederationServer(FederationBase):
return ret
@defer.inlineCallbacks
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
def on_exchange_third_party_invite_request(self, room_id, event_dict):
ret = yield self.handler.on_exchange_third_party_invite_request(
origin, room_id, event_dict
room_id, event_dict
)
return ret
@ -808,12 +811,13 @@ class FederationHandlerRegistry(object):
if not handler:
logger.warn("No handler registered for EDU type %s", edu_type)
try:
yield handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
with start_active_span_from_edu(content, "handle_edu"):
try:
yield handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
def on_query(self, query_type, args):
handler = self.query_handlers.get(query_type)

View file

@ -14,11 +14,20 @@
# limitations under the License.
import logging
from canonicaljson import json
from twisted.internet import defer
from synapse.api.errors import HttpResponseException
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Transaction
from synapse.logging.opentracing import (
extract_text_map,
set_tag,
start_active_span_follows_from,
tags,
whitelisted_homeserver,
)
from synapse.util.metrics import measure_func
logger = logging.getLogger(__name__)
@ -44,93 +53,115 @@ class TransactionManager(object):
@defer.inlineCallbacks
def send_new_transaction(self, destination, pending_pdus, pending_edus):
# Sort based on the order field
pending_pdus.sort(key=lambda t: t[1])
pdus = [x[0] for x in pending_pdus]
edus = pending_edus
# Make a transaction-sending opentracing span. This span follows on from
# all the edus in that transaction. This needs to be done since there is
# no active span here, so if the edus were not received by the remote the
# span would have no causality and it would be forgotten.
# The span_contexts is a generator so that it won't be evaluated if
# opentracing is disabled. (Yay speed!)
success = True
span_contexts = []
keep_destination = whitelisted_homeserver(destination)
logger.debug("TX [%s] _attempt_new_transaction", destination)
for edu in pending_edus:
context = edu.get_context()
if context:
span_contexts.append(extract_text_map(json.loads(context)))
if keep_destination:
edu.strip_context()
txn_id = str(self._next_txn_id)
with start_active_span_follows_from("send_transaction", span_contexts):
logger.debug(
"TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
destination,
txn_id,
len(pdus),
len(edus),
)
# Sort based on the order field
pending_pdus.sort(key=lambda t: t[1])
pdus = [x[0] for x in pending_pdus]
edus = pending_edus
transaction = Transaction.create_new(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
origin=self._server_name,
destination=destination,
pdus=pdus,
edus=edus,
)
success = True
self._next_txn_id += 1
logger.debug("TX [%s] _attempt_new_transaction", destination)
logger.info(
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
destination,
txn_id,
transaction.transaction_id,
len(pdus),
len(edus),
)
txn_id = str(self._next_txn_id)
# Actually send the transaction
# FIXME (erikj): This is a bit of a hack to make the Pdu age
# keys work
def json_data_cb():
data = transaction.get_dict()
now = int(self.clock.time_msec())
if "pdus" in data:
for p in data["pdus"]:
if "age_ts" in p:
unsigned = p.setdefault("unsigned", {})
unsigned["age"] = now - int(p["age_ts"])
del p["age_ts"]
return data
try:
response = yield self._transport_layer.send_transaction(
transaction, json_data_cb
logger.debug(
"TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
destination,
txn_id,
len(pdus),
len(edus),
)
code = 200
except HttpResponseException as e:
code = e.code
response = e.response
if e.code in (401, 404, 429) or 500 <= e.code:
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
raise e
transaction = Transaction.create_new(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
origin=self._server_name,
destination=destination,
pdus=pdus,
edus=edus,
)
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
self._next_txn_id += 1
if code == 200:
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
logger.info(
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
destination,
txn_id,
transaction.transaction_id,
len(pdus),
len(edus),
)
# Actually send the transaction
# FIXME (erikj): This is a bit of a hack to make the Pdu age
# keys work
def json_data_cb():
data = transaction.get_dict()
now = int(self.clock.time_msec())
if "pdus" in data:
for p in data["pdus"]:
if "age_ts" in p:
unsigned = p.setdefault("unsigned", {})
unsigned["age"] = now - int(p["age_ts"])
del p["age_ts"]
return data
try:
response = yield self._transport_layer.send_transaction(
transaction, json_data_cb
)
code = 200
except HttpResponseException as e:
code = e.code
response = e.response
if e.code in (401, 404, 429) or 500 <= e.code:
logger.info(
"TX [%s] {%s} got %d response", destination, txn_id, code
)
raise e
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
if code == 200:
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
logger.warn(
"TX [%s] {%s} Remote returned error for %s: %s",
destination,
txn_id,
e_id,
r,
)
else:
for p in pdus:
logger.warn(
"TX [%s] {%s} Remote returned error for %s: %s",
"TX [%s] {%s} Failed to send event %s",
destination,
txn_id,
e_id,
r,
p.event_id,
)
else:
for p in pdus:
logger.warn(
"TX [%s] {%s} Failed to send event %s",
destination,
txn_id,
p.event_id,
)
success = False
success = False
return success
set_tag(tags.ERROR, not success)
return success

View file

@ -327,21 +327,37 @@ class TransportLayerClient(object):
include_all_networks=False,
third_party_instance_id=None,
):
path = _create_v1_path("/publicRooms")
if search_filter:
# this uses MSC2197 (Search Filtering over Federation)
path = _create_v1_path("/publicRooms")
args = {"include_all_networks": "true" if include_all_networks else "false"}
if third_party_instance_id:
args["third_party_instance_id"] = (third_party_instance_id,)
if limit:
args["limit"] = [str(limit)]
if since_token:
args["since"] = [since_token]
data = {"include_all_networks": "true" if include_all_networks else "false"}
if third_party_instance_id:
data["third_party_instance_id"] = third_party_instance_id
if limit:
data["limit"] = str(limit)
if since_token:
data["since"] = since_token
# TODO(erikj): Actually send the search_filter across federation.
data["filter"] = search_filter
response = yield self.client.get_json(
destination=remote_server, path=path, args=args, ignore_backoff=True
)
response = yield self.client.post_json(
destination=remote_server, path=path, data=data, ignore_backoff=True
)
else:
path = _create_v1_path("/publicRooms")
args = {"include_all_networks": "true" if include_all_networks else "false"}
if third_party_instance_id:
args["third_party_instance_id"] = (third_party_instance_id,)
if limit:
args["limit"] = [str(limit)]
if since_token:
args["since"] = [since_token]
response = yield self.client.get_json(
destination=remote_server, path=path, args=args, ignore_backoff=True
)
return response

View file

@ -22,7 +22,6 @@ import re
from twisted.internet.defer import maybeDeferred
import synapse
import synapse.logging.opentracing as opentracing
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
@ -39,6 +38,12 @@ from synapse.http.servlet import (
parse_string_from_args,
)
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
start_active_span,
start_active_span_from_request,
tags,
whitelisted_homeserver,
)
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.versionstring import get_version_string
@ -288,19 +293,28 @@ class BaseFederationServlet(object):
logger.warn("authenticate_request failed: %s", e)
raise
# Start an opentracing span
with opentracing.start_active_span_from_context(
request.requestHeaders,
"incoming-federation-request",
tags={
"request_id": request.get_request_id(),
opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_SERVER,
opentracing.tags.HTTP_METHOD: request.get_method(),
opentracing.tags.HTTP_URL: request.get_redacted_uri(),
opentracing.tags.PEER_HOST_IPV6: request.getClientIP(),
"authenticated_entity": origin,
},
):
request_tags = {
"request_id": request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
"authenticated_entity": origin,
"servlet_name": request.request_metrics.name,
}
# Only accept the span context if the origin is authenticated
# and whitelisted
if origin and whitelisted_homeserver(origin):
scope = start_active_span_from_request(
request, "incoming-federation-request", tags=request_tags
)
else:
scope = start_active_span(
"incoming-federation-request", tags=request_tags
)
with scope:
if origin:
with ratelimiter.ratelimit(origin) as d:
await d
@ -328,7 +342,11 @@ class BaseFederationServlet(object):
continue
server.register_paths(
method, (pattern,), self._wrap(code), self.__class__.__name__
method,
(pattern,),
self._wrap(code),
self.__class__.__name__,
trace=False,
)
@ -557,7 +575,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
async def on_PUT(self, origin, content, query, room_id):
content = await self.handler.on_exchange_third_party_invite_request(
origin, room_id, content
room_id, content
)
return 200, content
@ -756,6 +774,42 @@ class PublicRoomList(BaseFederationServlet):
)
return 200, data
async def on_POST(self, origin, content, query):
# This implements MSC2197 (Search Filtering over Federation)
if not self.allow_access:
raise FederationDeniedError(origin)
limit = int(content.get("limit", 100))
since_token = content.get("since", None)
search_filter = content.get("filter", None)
include_all_networks = content.get("include_all_networks", False)
third_party_instance_id = content.get("third_party_instance_id", None)
if include_all_networks:
network_tuple = None
if third_party_instance_id is not None:
raise SynapseError(
400, "Can't use include_all_networks with an explicit network"
)
elif third_party_instance_id is None:
network_tuple = ThirdPartyInstanceID(None, None)
else:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
if search_filter is None:
logger.warning("Nonefilter")
data = await self.handler.get_local_public_room_list(
limit=limit,
since_token=since_token,
search_filter=search_filter,
network_tuple=network_tuple,
from_federation=True,
)
return 200, data
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"

View file

@ -38,6 +38,12 @@ class Edu(JsonEncodedObject):
internal_keys = ["origin", "destination"]
def get_context(self):
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
def strip_context(self):
getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}"
class Transaction(JsonEncodedObject):
""" A transaction is a list of Pdus and Edus to be sent to a remote home

View file

@ -45,6 +45,7 @@ class BaseHandler(object):
self.state_handler = hs.get_state_handler()
self.distributor = hs.get_distributor()
self.ratelimiter = hs.get_ratelimiter()
self.admin_redaction_ratelimiter = hs.get_admin_redaction_ratelimiter()
self.clock = hs.get_clock()
self.hs = hs
@ -53,7 +54,7 @@ class BaseHandler(object):
self.event_builder_factory = hs.get_event_builder_factory()
@defer.inlineCallbacks
def ratelimit(self, requester, update=True):
def ratelimit(self, requester, update=True, is_admin_redaction=False):
"""Ratelimits requests.
Args:
@ -62,6 +63,9 @@ class BaseHandler(object):
Set to False when doing multiple checks for one request (e.g.
to check up front if we would reject the request), and set to
True for the last call for a given request.
is_admin_redaction (bool): Whether this is a room admin/moderator
redacting an event. If so then we may apply different
ratelimits depending on config.
Raises:
LimitExceededError if the request should be ratelimited
@ -90,16 +94,33 @@ class BaseHandler(object):
messages_per_second = override.messages_per_second
burst_count = override.burst_count
else:
messages_per_second = self.hs.config.rc_message.per_second
burst_count = self.hs.config.rc_message.burst_count
# We default to different values if this is an admin redaction and
# the config is set
if is_admin_redaction and self.hs.config.rc_admin_redaction:
messages_per_second = self.hs.config.rc_admin_redaction.per_second
burst_count = self.hs.config.rc_admin_redaction.burst_count
else:
messages_per_second = self.hs.config.rc_message.per_second
burst_count = self.hs.config.rc_message.burst_count
allowed, time_allowed = self.ratelimiter.can_do_action(
user_id,
time_now,
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
)
if is_admin_redaction and self.hs.config.rc_admin_redaction:
# If we have separate config for admin redactions we use a separate
# ratelimiter
allowed, time_allowed = self.admin_redaction_ratelimiter.can_do_action(
user_id,
time_now,
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
)
else:
allowed, time_allowed = self.ratelimiter.can_do_action(
user_id,
time_now,
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now))

View file

@ -51,8 +51,8 @@ class AccountDataEventSource(object):
{"type": account_data_type, "content": content, "room_id": room_id}
)
return (results, current_stream_id)
return results, current_stream_id
@defer.inlineCallbacks
def get_pagination_rows(self, user, config, key):
return ([], config.to_id)
return [], config.to_id

View file

@ -38,6 +38,7 @@ logger = logging.getLogger(__name__)
class AccountValidityHandler(object):
def __init__(self, hs):
self.hs = hs
self.config = hs.config
self.store = self.hs.get_datastore()
self.sendmail = self.hs.get_sendmail()
self.clock = self.hs.get_clock()
@ -62,9 +63,14 @@ class AccountValidityHandler(object):
self._raw_from = email.utils.parseaddr(self._from_string)[1]
self._template_html, self._template_text = load_jinja2_templates(
config=self.hs.config,
template_html_name=self.hs.config.email_expiry_template_html,
template_text_name=self.hs.config.email_expiry_template_text,
self.config.email_template_dir,
[
self.config.email_expiry_template_html,
self.config.email_expiry_template_text,
],
apply_format_ts_filter=True,
apply_mxc_to_http_filter=True,
public_baseurl=self.config.public_baseurl,
)
# Check the renewal emails to send and send them every 30min.

View file

@ -94,6 +94,25 @@ class AdminHandler(BaseHandler):
return ret
def get_user_server_admin(self, user):
"""
Get the admin bit on a user.
Args:
user_id (UserID): the (necessarily local) user to manipulate
"""
return self.store.is_server_admin(user)
def set_user_server_admin(self, user, admin):
"""
Set the admin bit on a user.
Args:
user_id (UserID): the (necessarily local) user to manipulate
admin (bool): whether or not the user should be an admin of this server
"""
return self.store.set_server_admin(user, admin)
@defer.inlineCallbacks
def export_user_data(self, user_id, writer):
"""Write all data we have on the user to the given writer.

View file

@ -294,12 +294,10 @@ class ApplicationServicesHandler(object):
# we don't know if they are unknown or not since it isn't one of our
# users. We can't poke ASes.
return False
return
user_info = yield self.store.get_user_by_id(user_id)
if user_info:
return False
return
# user not found; could be the AS though, so check.
services = self.store.get_app_services()

View file

@ -38,6 +38,7 @@ from synapse.api.errors import (
UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.logging.context import defer_to_thread
from synapse.module_api import ModuleApi
from synapse.types import UserID
@ -158,7 +159,7 @@ class AuthHandler(BaseHandler):
return params
@defer.inlineCallbacks
def check_auth(self, flows, clientdict, clientip, password_servlet=False):
def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the User-Interactive Auth flow.
@ -182,16 +183,6 @@ class AuthHandler(BaseHandler):
clientip (str): The IP address of the client.
password_servlet (bool): Whether the request originated from
PasswordRestServlet.
XXX: This is a temporary hack to distinguish between checking
for threepid validations locally (in the case of password
resets) and using the identity server (in the case of binding
a 3PID during registration). Once we start using the
homeserver for both tasks, this distinction will no longer be
necessary.
Returns:
defer.Deferred[dict, dict, str]: a deferred tuple of
(creds, params, session_id).
@ -247,9 +238,7 @@ class AuthHandler(BaseHandler):
if "type" in authdict:
login_type = authdict["type"]
try:
result = yield self._check_auth_dict(
authdict, clientip, password_servlet=password_servlet
)
result = yield self._check_auth_dict(authdict, clientip)
if result:
creds[login_type] = result
self._save_session(session)
@ -280,7 +269,7 @@ class AuthHandler(BaseHandler):
creds,
list(clientdict),
)
return (creds, clientdict, session["id"])
return creds, clientdict, session["id"]
ret = self._auth_dict_for_flows(flows, session)
ret["completed"] = list(creds)
@ -356,7 +345,7 @@ class AuthHandler(BaseHandler):
return sess.setdefault("serverdict", {}).get(key, default)
@defer.inlineCallbacks
def _check_auth_dict(self, authdict, clientip, password_servlet=False):
def _check_auth_dict(self, authdict, clientip):
"""Attempt to validate the auth dict provided by a client
Args:
@ -374,11 +363,7 @@ class AuthHandler(BaseHandler):
login_type = authdict["type"]
checker = self.checkers.get(login_type)
if checker is not None:
# XXX: Temporary workaround for having Synapse handle password resets
# See AuthHandler.check_auth for further details
res = yield checker(
authdict, clientip=clientip, password_servlet=password_servlet
)
res = yield checker(authdict, clientip=clientip)
return res
# build a v1-login-style dict out of the authdict and fall back to the
@ -449,7 +434,7 @@ class AuthHandler(BaseHandler):
return defer.succeed(True)
@defer.inlineCallbacks
def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
def _check_threepid(self, medium, authdict, **kwargs):
if "threepid_creds" not in authdict:
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
@ -458,12 +443,18 @@ class AuthHandler(BaseHandler):
identity_handler = self.hs.get_handlers().identity_handler
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
if (
not password_servlet
or self.hs.config.email_password_reset_behaviour == "remote"
):
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
elif self.hs.config.email_password_reset_behaviour == "local":
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
if medium == "email":
threepid = yield identity_handler.threepid_from_creds(
self.hs.config.account_threepid_delegate_email, threepid_creds
)
elif medium == "msisdn":
threepid = yield identity_handler.threepid_from_creds(
self.hs.config.account_threepid_delegate_msisdn, threepid_creds
)
else:
raise SynapseError(400, "Unrecognized threepid medium: %s" % (medium,))
elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
row = yield self.store.get_threepid_validation_session(
medium,
threepid_creds["client_secret"],
@ -722,7 +713,7 @@ class AuthHandler(BaseHandler):
known_login_type = True
is_valid = yield provider.check_password(qualified_user_id, password)
if is_valid:
return (qualified_user_id, None)
return qualified_user_id, None
if not hasattr(provider, "get_supported_login_types") or not hasattr(
provider, "check_auth"
@ -766,7 +757,7 @@ class AuthHandler(BaseHandler):
)
if canonical_user_id:
return (canonical_user_id, None)
return canonical_user_id, None
if not known_login_type:
raise SynapseError(400, "Unknown login type %s" % login_type)
@ -816,7 +807,7 @@ class AuthHandler(BaseHandler):
result = (result, None)
return result
return (None, None)
return None, None
@defer.inlineCallbacks
def _check_local_password(self, user_id, password):

View file

@ -25,6 +25,7 @@ from synapse.api.errors import (
HttpResponseException,
RequestSendFailed,
)
from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
@ -45,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler):
self.state = hs.get_state_handler()
self._auth_handler = hs.get_auth_handler()
@trace
@defer.inlineCallbacks
def get_devices_by_user(self, user_id):
"""
@ -56,6 +58,7 @@ class DeviceWorkerHandler(BaseHandler):
defer.Deferred: list[dict[str, X]]: info on each device
"""
set_tag("user_id", user_id)
device_map = yield self.store.get_devices_by_user(user_id)
ips = yield self.store.get_last_client_ip_by_device(user_id, device_id=None)
@ -64,8 +67,10 @@ class DeviceWorkerHandler(BaseHandler):
for device in devices:
_update_device_from_client_ips(device, ips)
log_kv(device_map)
return devices
@trace
@defer.inlineCallbacks
def get_device(self, user_id, device_id):
""" Retrieve the given device
@ -85,9 +90,14 @@ class DeviceWorkerHandler(BaseHandler):
raise errors.NotFoundError
ips = yield self.store.get_last_client_ip_by_device(user_id, device_id)
_update_device_from_client_ips(device, ips)
set_tag("device", device)
set_tag("ips", ips)
return device
@measure_func("device.get_user_ids_changed")
@trace
@defer.inlineCallbacks
def get_user_ids_changed(self, user_id, from_token):
"""Get list of users that have had the devices updated, or have newly
@ -97,6 +107,9 @@ class DeviceWorkerHandler(BaseHandler):
user_id (str)
from_token (StreamToken)
"""
set_tag("user_id", user_id)
set_tag("from_token", from_token)
now_room_key = yield self.store.get_room_events_max_id()
room_ids = yield self.store.get_rooms_for_user(user_id)
@ -148,6 +161,9 @@ class DeviceWorkerHandler(BaseHandler):
# special-case for an empty prev state: include all members
# in the changed list
if not event_ids:
log_kv(
{"event": "encountered empty previous state", "room_id": room_id}
)
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
@ -200,7 +216,11 @@ class DeviceWorkerHandler(BaseHandler):
possibly_joined = []
possibly_left = []
return {"changed": list(possibly_joined), "left": list(possibly_left)}
result = {"changed": list(possibly_joined), "left": list(possibly_left)}
log_kv(result)
return result
class DeviceHandler(DeviceWorkerHandler):
@ -267,6 +287,7 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.")
@trace
@defer.inlineCallbacks
def delete_device(self, user_id, device_id):
""" Delete the given device
@ -284,6 +305,10 @@ class DeviceHandler(DeviceWorkerHandler):
except errors.StoreError as e:
if e.code == 404:
# no match
set_tag("error", True)
log_kv(
{"reason": "User doesn't have device id.", "device_id": device_id}
)
pass
else:
raise
@ -296,6 +321,7 @@ class DeviceHandler(DeviceWorkerHandler):
yield self.notify_device_update(user_id, [device_id])
@trace
@defer.inlineCallbacks
def delete_all_devices_for_user(self, user_id, except_device_id=None):
"""Delete all of the user's devices
@ -331,6 +357,8 @@ class DeviceHandler(DeviceWorkerHandler):
except errors.StoreError as e:
if e.code == 404:
# no match
set_tag("error", True)
set_tag("reason", "User doesn't have that device id.")
pass
else:
raise
@ -371,6 +399,7 @@ class DeviceHandler(DeviceWorkerHandler):
else:
raise
@trace
@measure_func("notify_device_update")
@defer.inlineCallbacks
def notify_device_update(self, user_id, device_ids):
@ -386,6 +415,8 @@ class DeviceHandler(DeviceWorkerHandler):
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
hosts.discard(self.server_name)
set_tag("target_hosts", hosts)
position = yield self.store.add_device_change_to_streams(
user_id, device_ids, list(hosts)
)
@ -405,6 +436,7 @@ class DeviceHandler(DeviceWorkerHandler):
)
for host in hosts:
self.federation_sender.send_device_messages(host)
log_kv({"message": "sent device update to host", "host": host})
@defer.inlineCallbacks
def on_federation_query_user_devices(self, user_id):
@ -451,12 +483,15 @@ class DeviceListUpdater(object):
iterable=True,
)
@trace
@defer.inlineCallbacks
def incoming_device_list_update(self, origin, edu_content):
"""Called on incoming device list update from federation. Responsible
for parsing the EDU and adding to pending updates list.
"""
set_tag("origin", origin)
set_tag("edu_content", edu_content)
user_id = edu_content.pop("user_id")
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
@ -471,12 +506,30 @@ class DeviceListUpdater(object):
device_id,
origin,
)
set_tag("error", True)
log_kv(
{
"message": "Got a device list update edu from a user and "
"device which does not match the origin of the request.",
"user_id": user_id,
"device_id": device_id,
}
)
return
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
set_tag("error", True)
log_kv(
{
"message": "Got an update from a user for which "
"we don't share any rooms",
"other user_id": user_id,
}
)
logger.warning(
"Got device list update edu for %r/%r, but don't share a room",
user_id,
@ -578,6 +631,7 @@ class DeviceListUpdater(object):
request:
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
"""
log_kv({"message": "Doing resync to update device list."})
# Fetch all devices for the user.
origin = get_domain_from_id(user_id)
try:
@ -594,13 +648,20 @@ class DeviceListUpdater(object):
# eventually become consistent.
return
except FederationDeniedError as e:
set_tag("error", True)
log_kv({"reason": "FederationDeniedError"})
logger.info(e)
return
except Exception:
except Exception as e:
# TODO: Remember that we are now out of sync and try again
# later
set_tag("error", True)
log_kv(
{"message": "Exception raised by federation request", "exception": e}
)
logger.exception("Failed to handle device list update for %s", user_id)
return
log_kv({"result": result})
stream_id = result["stream_id"]
devices = result["devices"]

View file

@ -15,9 +15,17 @@
import logging
from canonicaljson import json
from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.logging.opentracing import (
get_active_span_text_map,
log_kv,
set_tag,
start_active_span,
)
from synapse.types import UserID, get_domain_from_id
from synapse.util.stringutils import random_string
@ -78,7 +86,8 @@ class DeviceMessageHandler(object):
@defer.inlineCallbacks
def send_device_message(self, sender_user_id, message_type, messages):
set_tag("number_of_messages", len(messages))
set_tag("sender", sender_user_id)
local_messages = {}
remote_messages = {}
for user_id, by_device in messages.items():
@ -100,15 +109,21 @@ class DeviceMessageHandler(object):
message_id = random_string(16)
context = get_active_span_text_map()
remote_edu_contents = {}
for destination, messages in remote_messages.items():
remote_edu_contents[destination] = {
"messages": messages,
"sender": sender_user_id,
"type": message_type,
"message_id": message_id,
}
with start_active_span("to_device_for_user"):
set_tag("destination", destination)
remote_edu_contents[destination] = {
"messages": messages,
"sender": sender_user_id,
"type": message_type,
"message_id": message_id,
"org.matrix.opentracing_context": json.dumps(context),
}
log_kv({"local_messages": local_messages})
stream_id = yield self.store.add_messages_to_device_inbox(
local_messages, remote_edu_contents
)
@ -117,6 +132,7 @@ class DeviceMessageHandler(object):
"to_device_key", stream_id, users=local_messages.keys()
)
log_kv({"remote_messages": remote_messages})
for destination in remote_messages.keys():
# Enqueue a new federation transaction to send the new
# device messages to each remote destination.

View file

@ -24,6 +24,7 @@ from twisted.internet import defer
from synapse.api.errors import CodeMessageException, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.types import UserID, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util.retryutils import NotRetryingDestination
@ -46,6 +47,7 @@ class E2eKeysHandler(object):
"client_keys", self.on_federation_query_client_keys
)
@trace
@defer.inlineCallbacks
def query_devices(self, query_body, timeout):
""" Handle a device key query from a client
@ -81,6 +83,9 @@ class E2eKeysHandler(object):
else:
remote_queries[user_id] = device_ids
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
# First get local devices.
failures = {}
results = {}
@ -121,6 +126,7 @@ class E2eKeysHandler(object):
r[user_id] = remote_queries[user_id]
# Now fetch any devices that we don't have in our cache
@trace
@defer.inlineCallbacks
def do_remote_query(destination):
"""This is called when we are querying the device list of a user on
@ -185,6 +191,8 @@ class E2eKeysHandler(object):
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", failure)
yield make_deferred_yieldable(
defer.gatherResults(
@ -198,6 +206,7 @@ class E2eKeysHandler(object):
return {"device_keys": results, "failures": failures}
@trace
@defer.inlineCallbacks
def query_local_devices(self, query):
"""Get E2E device keys for local users
@ -210,6 +219,7 @@ class E2eKeysHandler(object):
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
set_tag("local_query", query)
local_query = []
result_dict = {}
@ -217,6 +227,14 @@ class E2eKeysHandler(object):
# we use UserID.from_string to catch invalid user ids
if not self.is_mine(UserID.from_string(user_id)):
logger.warning("Request for keys for non-local user %s", user_id)
log_kv(
{
"message": "Requested a local key for a user which"
" was not local to the homeserver",
"user_id": user_id,
}
)
set_tag("error", True)
raise SynapseError(400, "Not a user here")
if not device_ids:
@ -241,6 +259,7 @@ class E2eKeysHandler(object):
r["unsigned"]["device_display_name"] = display_name
result_dict[user_id][device_id] = r
log_kv(results)
return result_dict
@defer.inlineCallbacks
@ -251,6 +270,7 @@ class E2eKeysHandler(object):
res = yield self.query_local_devices(device_keys_query)
return {"device_keys": res}
@trace
@defer.inlineCallbacks
def claim_one_time_keys(self, query, timeout):
local_query = []
@ -265,6 +285,9 @@ class E2eKeysHandler(object):
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
results = yield self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
@ -276,8 +299,10 @@ class E2eKeysHandler(object):
key_id: json.loads(json_bytes)
}
@trace
@defer.inlineCallbacks
def claim_client_keys(destination):
set_tag("destination", destination)
device_keys = remote_queries[destination]
try:
remote_result = yield self.federation.claim_client_keys(
@ -290,6 +315,8 @@ class E2eKeysHandler(object):
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", failure)
yield make_deferred_yieldable(
defer.gatherResults(
@ -313,9 +340,11 @@ class E2eKeysHandler(object):
),
)
log_kv({"one_time_keys": json_result, "failures": failures})
return {"one_time_keys": json_result, "failures": failures}
@defer.inlineCallbacks
@tag_args
def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
@ -329,6 +358,13 @@ class E2eKeysHandler(object):
user_id,
time_now,
)
log_kv(
{
"message": "Updating device_keys for user.",
"user_id": user_id,
"device_id": device_id,
}
)
# TODO: Sign the JSON with the server key
changed = yield self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
@ -336,12 +372,24 @@ class E2eKeysHandler(object):
if changed:
# Only notify about device updates *if* the keys actually changed
yield self.device_handler.notify_device_update(user_id, [device_id])
else:
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
log_kv(
{
"message": "Updating one_time_keys for device.",
"user_id": user_id,
"device_id": device_id,
}
)
yield self._upload_one_time_keys_for_user(
user_id, device_id, time_now, one_time_keys
)
else:
log_kv(
{"message": "Did not update one_time_keys", "reason": "no keys given"}
)
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
@ -352,6 +400,7 @@ class E2eKeysHandler(object):
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
set_tag("one_time_key_counts", result)
return {"one_time_key_counts": result}
@defer.inlineCallbacks
@ -395,6 +444,7 @@ class E2eKeysHandler(object):
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)

View file

@ -26,6 +26,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
from synapse.logging.opentracing import log_kv, trace
from synapse.util.async_helpers import Linearizer
logger = logging.getLogger(__name__)
@ -49,6 +50,7 @@ class E2eRoomKeysHandler(object):
# changed.
self._upload_linearizer = Linearizer("upload_room_keys_lock")
@trace
@defer.inlineCallbacks
def get_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
@ -84,8 +86,10 @@ class E2eRoomKeysHandler(object):
user_id, version, room_id, session_id
)
log_kv(results)
return results
@trace
@defer.inlineCallbacks
def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
@ -107,6 +111,7 @@ class E2eRoomKeysHandler(object):
with (yield self._upload_linearizer.queue(user_id)):
yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
@trace
@defer.inlineCallbacks
def upload_room_keys(self, user_id, version, room_keys):
"""Bulk upload a list of room keys into a given backup version, asserting
@ -186,7 +191,14 @@ class E2eRoomKeysHandler(object):
session_id(str): the session whose room_key we're setting
room_key(dict): the room_key being set
"""
log_kv(
{
"message": "Trying to upload room key",
"room_id": room_id,
"session_id": session_id,
"user_id": user_id,
}
)
# get the room_key for this particular row
current_room_key = None
try:
@ -195,14 +207,23 @@ class E2eRoomKeysHandler(object):
)
except StoreError as e:
if e.code == 404:
pass
log_kv(
{
"message": "Room key not found.",
"room_id": room_id,
"user_id": user_id,
}
)
else:
raise
if self._should_replace_room_key(current_room_key, room_key):
log_kv({"message": "Replacing room key."})
yield self.store.set_e2e_room_key(
user_id, version, room_id, session_id, room_key
)
else:
log_kv({"message": "Not replacing room_key."})
@staticmethod
def _should_replace_room_key(current_room_key, room_key):
@ -236,6 +257,7 @@ class E2eRoomKeysHandler(object):
return False
return True
@trace
@defer.inlineCallbacks
def create_version(self, user_id, version_info):
"""Create a new backup version. This automatically becomes the new
@ -294,6 +316,7 @@ class E2eRoomKeysHandler(object):
raise
return res
@trace
@defer.inlineCallbacks
def delete_version(self, user_id, version=None):
"""Deletes a given version of the user's e2e_room_keys backup
@ -314,6 +337,7 @@ class E2eRoomKeysHandler(object):
else:
raise
@trace
@defer.inlineCallbacks
def update_version(self, user_id, version, version_info):
"""Update the info about a given version of the user's backup

View file

@ -167,7 +167,6 @@ class EventHandler(BaseHandler):
if not event:
return None
return
users = yield self.store.get_users_in_room(event.room_id)
is_peeking = user.to_string() not in users

View file

@ -326,8 +326,9 @@ class FederationHandler(BaseHandler):
ours = yield self.store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
# type: list[dict[tuple[str, str], str]]
state_maps = list(ours.values())
state_maps = list(
ours.values()
) # type: list[dict[tuple[str, str], str]]
# we don't need this any more, let's delete it.
del ours
@ -1427,7 +1428,7 @@ class FederationHandler(BaseHandler):
assert event.user_id == user_id
assert event.state_key == user_id
assert event.room_id == room_id
return (origin, event, format_ver)
return origin, event, format_ver
@defer.inlineCallbacks
@log_function
@ -2529,12 +2530,17 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
def on_exchange_third_party_invite_request(self, room_id, event_dict):
"""Handle an exchange_third_party_invite request from a remote server
The remote server will call this when it wants to turn a 3pid invite
into a normal m.room.member invite.
Args:
room_id (str): The ID of the room.
event_dict (dict[str, Any]): Dictionary containing the event body.
Returns:
Deferred: resolves (to None)
"""

View file

@ -29,6 +29,7 @@ from synapse.api.errors import (
HttpResponseException,
SynapseError,
)
from synapse.util.stringutils import random_string
from ._base import BaseHandler
@ -41,86 +42,130 @@ class IdentityHandler(BaseHandler):
self.http_client = hs.get_simple_http_client()
self.federation_http_client = hs.get_http_client()
self.hs = hs
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
def _extract_items_from_creds_dict(self, creds):
"""
Retrieve entries from a "credentials" dictionary
Args:
creds (dict[str, str]): Dictionary of credentials that contain the following keys:
* client_secret|clientSecret: A unique secret str provided by the client
* id_server|idServer: the domain of the identity server to query
* id_access_token: The access token to authenticate to the identity
server with.
Returns:
tuple(str, str, str|None): A tuple containing the client_secret, the id_server,
and the id_access_token value if available.
"""
client_secret = creds.get("client_secret") or creds.get("clientSecret")
if not client_secret:
raise SynapseError(
400, "No client_secret in creds", errcode=Codes.MISSING_PARAM
)
id_server = creds.get("id_server") or creds.get("idServer")
if not id_server:
raise SynapseError(
400, "No id_server in creds", errcode=Codes.MISSING_PARAM
)
id_access_token = creds.get("id_access_token")
return client_secret, id_server, id_access_token
@defer.inlineCallbacks
def threepid_from_creds(self, id_server, creds):
"""
Retrieve and validate a threepid identifier from a "credentials" dictionary against a
given identity server
Args:
id_server (str|None): The identity server to validate 3PIDs against. If None,
we will attempt to extract id_server creds
creds (dict[str, str]): Dictionary containing the following keys:
* id_server|idServer: An optional domain name of an identity server
* client_secret|clientSecret: A unique secret str provided by the client
* sid: The ID of the validation session
Returns:
Deferred[dict[str,str|int]|None]: A dictionary consisting of response params to
the /getValidated3pid endpoint of the Identity Service API, or None if the
threepid was not found
"""
client_secret = creds.get("client_secret") or creds.get("clientSecret")
if not client_secret:
raise SynapseError(
400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM
)
session_id = creds.get("sid")
if not session_id:
raise SynapseError(
400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM
)
if not id_server:
# Attempt to get the id_server from the creds dict
id_server = creds.get("id_server") or creds.get("idServer")
if not id_server:
raise SynapseError(
400, "Missing param id_server in creds", errcode=Codes.MISSING_PARAM
)
query_params = {"sid": session_id, "client_secret": client_secret}
url = "https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/3pid/getValidated3pid",
)
def _should_trust_id_server(self, id_server):
if id_server not in self.trusted_id_servers:
if self.trust_any_id_server_just_for_testing_do_not_use:
logger.warn(
"Trusting untrustworthy ID server %r even though it isn't"
" in the trusted id list for testing because"
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
" is set in the config",
id_server,
)
else:
return False
return True
data = yield self.http_client.get_json(url, query_params)
return data if "medium" in data else None
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
if "id_server" in creds:
id_server = creds["id_server"]
elif "idServer" in creds:
id_server = creds["idServer"]
else:
raise SynapseError(400, "No id_server in creds")
def bind_threepid(self, creds, mxid, use_v2=True):
"""Bind a 3PID to an identity server
if "client_secret" in creds:
client_secret = creds["client_secret"]
elif "clientSecret" in creds:
client_secret = creds["clientSecret"]
else:
raise SynapseError(400, "No client_secret in creds")
Args:
creds (dict[str, str]): Dictionary of credentials that contain the following keys:
* client_secret|clientSecret: A unique secret str provided by the client
* id_server|idServer: the domain of the identity server to query
* id_access_token: The access token to authenticate to the identity
server with. Required if use_v2 is true
mxid (str): The MXID to bind the 3PID to
use_v2 (bool): Whether to use v2 Identity Service API endpoints
if not self._should_trust_id_server(id_server):
logger.warn(
"%s is not a trusted ID server: rejecting 3pid " + "credentials",
id_server,
)
return None
try:
data = yield self.http_client.get_json(
"https://%s%s"
% (id_server, "/_matrix/identity/api/v1/3pid/getValidated3pid"),
{"sid": creds["sid"], "client_secret": client_secret},
)
except HttpResponseException as e:
logger.info("getValidated3pid failed with Matrix error: %r", e)
raise e.to_synapse_error()
if "medium" in data:
return data
return None
@defer.inlineCallbacks
def bind_threepid(self, creds, mxid):
Returns:
Deferred[dict]: The response from the identity server
"""
logger.debug("binding threepid %r to %s", creds, mxid)
data = None
if "id_server" in creds:
id_server = creds["id_server"]
elif "idServer" in creds:
id_server = creds["idServer"]
else:
raise SynapseError(400, "No id_server in creds")
client_secret, id_server, id_access_token = self._extract_items_from_creds_dict(
creds
)
if "client_secret" in creds:
client_secret = creds["client_secret"]
elif "clientSecret" in creds:
client_secret = creds["clientSecret"]
sid = creds.get("sid")
if not sid:
raise SynapseError(
400, "No sid in three_pid_creds", errcode=Codes.MISSING_PARAM
)
# If an id_access_token is not supplied, force usage of v1
if id_access_token is None:
use_v2 = False
# Decide which API endpoint URLs to use
headers = {}
bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
if use_v2:
bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,)
headers["Authorization"] = create_id_access_token_header(id_access_token)
else:
raise SynapseError(400, "No client_secret in creds")
bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,)
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"),
{"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid},
bind_url, bind_data, headers=headers
)
logger.debug("bound threepid %r to %s", creds, mxid)
@ -131,13 +176,23 @@ class IdentityHandler(BaseHandler):
address=data["address"],
id_server=id_server,
)
return data
except HttpResponseException as e:
if e.code != 404 or not use_v2:
logger.error("3PID bind failed with Matrix error: %r", e)
raise e.to_synapse_error()
except CodeMessageException as e:
data = json.loads(e.msg) # XXX WAT?
return data
return data
logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url)
return (yield self.bind_threepid(creds, mxid, use_v2=False))
@defer.inlineCallbacks
def try_unbind_threepid(self, mxid, threepid):
"""Removes a binding from an identity server
"""Attempt to remove a 3PID from an identity server, or if one is not provided, all
identity servers we're aware the binding is present on
Args:
mxid (str): Matrix user ID of binding to be removed
@ -188,6 +243,8 @@ class IdentityHandler(BaseHandler):
server doesn't support unbinding
"""
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii")
content = {
"mxid": mxid,
"threepid": {"medium": threepid["medium"], "address": threepid["address"]},
@ -199,7 +256,7 @@ class IdentityHandler(BaseHandler):
auth_headers = self.federation_http_client.build_auth_headers(
destination=None,
method="POST",
url_bytes="/_matrix/identity/api/v1/3pid/unbind".encode("ascii"),
url_bytes=url_bytes,
content=content,
destination_is=id_server,
)
@ -226,28 +283,122 @@ class IdentityHandler(BaseHandler):
return changed
@defer.inlineCallbacks
def send_threepid_validation(
self,
email_address,
client_secret,
send_attempt,
send_email_func,
next_link=None,
):
"""Send a threepid validation email for password reset or
registration purposes
Args:
email_address (str): The user's email address
client_secret (str): The provided client secret
send_attempt (int): Which send attempt this is
send_email_func (func): A function that takes an email address, token,
client_secret and session_id, sends an email
and returns a Deferred.
next_link (str|None): The URL to redirect the user to after validation
Returns:
The new session_id upon success
Raises:
SynapseError is an error occurred when sending the email
"""
# Check that this email/client_secret/send_attempt combo is new or
# greater than what we've seen previously
session = yield self.store.get_threepid_validation_session(
"email", client_secret, address=email_address, validated=False
)
# Check to see if a session already exists and that it is not yet
# marked as validated
if session and session.get("validated_at") is None:
session_id = session["session_id"]
last_send_attempt = session["last_send_attempt"]
# Check that the send_attempt is higher than previous attempts
if send_attempt <= last_send_attempt:
# If not, just return a success without sending an email
return session_id
else:
# An non-validated session does not exist yet.
# Generate a session id
session_id = random_string(16)
# Generate a new validation token
token = random_string(32)
# Send the mail with the link containing the token, client_secret
# and session_id
try:
yield send_email_func(email_address, token, client_secret, session_id)
except Exception:
logger.exception(
"Error sending threepid validation email to %s", email_address
)
raise SynapseError(500, "An error was encountered when sending the email")
token_expires = (
self.hs.clock.time_msec() + self.hs.config.email_validation_token_lifetime
)
yield self.store.start_or_continue_validation_session(
"email",
email_address,
session_id,
client_secret,
send_attempt,
next_link,
token,
token_expires,
)
return session_id
@defer.inlineCallbacks
def requestEmailToken(
self, id_server, email, client_secret, send_attempt, next_link=None
):
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED
)
"""
Request an external server send an email on our behalf for the purposes of threepid
validation.
Args:
id_server (str): The identity server to proxy to
email (str): The email to send the message to
client_secret (str): The unique client_secret sends by the user
send_attempt (int): Which attempt this is
next_link: A link to redirect the user to once they submit the token
Returns:
The json response body from the server
"""
params = {
"email": email,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
if next_link:
params.update({"next_link": next_link})
params["next_link"] = next_link
if self.hs.config.using_identity_server_from_trusted_list:
# Warn that a deprecated config option is in use
logger.warn(
'The config option "trust_identity_server_for_password_resets" '
'has been replaced by "account_threepid_delegate". '
"Please consult the sample config at docs/sample_config.yaml for "
"details and update your config file."
)
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s"
% (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"),
id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
params,
)
return data
@ -257,28 +408,85 @@ class IdentityHandler(BaseHandler):
@defer.inlineCallbacks
def requestMsisdnToken(
self, id_server, country, phone_number, client_secret, send_attempt, **kwargs
self,
id_server,
country,
phone_number,
client_secret,
send_attempt,
next_link=None,
):
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED
)
"""
Request an external server send an SMS message on our behalf for the purposes of
threepid validation.
Args:
id_server (str): The identity server to proxy to
country (str): The country code of the phone number
phone_number (str): The number to send the message to
client_secret (str): The unique client_secret sends by the user
send_attempt (int): Which attempt this is
next_link: A link to redirect the user to once they submit the token
Returns:
The json response body from the server
"""
params = {
"country": country,
"phone_number": phone_number,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
params.update(kwargs)
if next_link:
params["next_link"] = next_link
if self.hs.config.using_identity_server_from_trusted_list:
# Warn that a deprecated config option is in use
logger.warn(
'The config option "trust_identity_server_for_password_resets" '
'has been replaced by "account_threepid_delegate". '
"Please consult the sample config at docs/sample_config.yaml for "
"details and update your config file."
)
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s"
% (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"),
id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
params,
)
return data
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
def create_id_access_token_header(id_access_token):
"""Create an Authorization header for passing to SimpleHttpClient as the header value
of an HTTP request.
Args:
id_access_token (str): An identity server access token.
Returns:
list[str]: The ascii-encoded bearer token encased in a list.
"""
# Prefix with Bearer
bearer_token = "Bearer %s" % id_access_token
# Encode headers to standard ascii
bearer_token.encode("ascii")
# Return as a list as that's how SimpleHttpClient takes header values
return [bearer_token]
class LookupAlgorithm:
"""
Supported hashing algorithms when performing a 3PID lookup.
SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64
encoding
NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext
"""
SHA256 = "sha256"
NONE = "none"

View file

@ -449,8 +449,7 @@ class InitialSyncHandler(BaseHandler):
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
return (member_event.membership, member_event.event_id)
return
return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
@ -459,8 +458,7 @@ class InitialSyncHandler(BaseHandler):
visibility
and visibility.content["history_visibility"] == "world_readable"
):
return (Membership.JOIN, None)
return
return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)

View file

@ -24,7 +24,7 @@ from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse import event_auth
from synapse.api.constants import EventTypes, Membership, RelationTypes
from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@ -469,6 +469,9 @@ class EventCreationHandler(object):
u = yield self.store.get_user_by_id(user_id)
assert u is not None
if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT):
# support and bot users are not required to consent
return
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
@ -726,7 +729,27 @@ class EventCreationHandler(object):
assert not self.config.worker_app
if ratelimit:
yield self.base_handler.ratelimit(requester)
# We check if this is a room admin redacting an event so that we
# can apply different ratelimiting. We do this by simply checking
# it's not a self-redaction (to avoid having to look up whether the
# user is actually admin or not).
is_admin_redaction = False
if event.type == EventTypes.Redaction:
original_event = yield self.store.get_event(
event.redacts,
check_redacted=False,
get_prev_content=False,
allow_rejected=False,
allow_none=True,
)
is_admin_redaction = (
original_event and event.sender != original_event.sender
)
yield self.base_handler.ratelimit(
requester, is_admin_redaction=is_admin_redaction
)
yield self.base_handler.maybe_kick_guest_users(event, context)

View file

@ -70,6 +70,7 @@ class PaginationHandler(object):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self._server_name = hs.hostname
self.pagination_lock = ReadWriteLock()
self._purges_in_progress_by_room = set()
@ -153,6 +154,22 @@ class PaginationHandler(object):
"""
return self._purges_by_id.get(purge_id)
async def purge_room(self, room_id):
"""Purge the given room from the database"""
with (await self.pagination_lock.write(room_id)):
# check we know about the room
await self.store.get_room_version(room_id)
# first check that we have no users in this room
joined = await defer.maybeDeferred(
self.store.is_host_joined, room_id, self._server_name
)
if joined:
raise SynapseError(400, "Users are still joined to this room")
await self.store.purge_room(room_id)
@defer.inlineCallbacks
def get_messages(
self,

View file

@ -255,7 +255,7 @@ class PresenceHandler(object):
self.unpersisted_users_changes = set()
if unpersisted:
logger.info("Persisting %d upersisted presence updates", len(unpersisted))
logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
yield self.store.update_presence(
[self.user_to_current_state[user_id] for user_id in unpersisted]
)
@ -1032,7 +1032,7 @@ class PresenceEventSource(object):
#
# Hence this guard where we just return nothing so that the sync
# doesn't return. C.f. #5503.
return ([], max_token)
return [], max_token
presence = self.get_presence_handler()
stream_change_cache = self.store.presence_stream_cache
@ -1279,7 +1279,7 @@ def get_interested_parties(store, states):
# Always notify self
users_to_states.setdefault(state.user_id, []).append(state)
return (room_ids_to_states, users_to_states)
return room_ids_to_states, users_to_states
@defer.inlineCallbacks

View file

@ -34,7 +34,7 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
MAX_DISPLAYNAME_LEN = 100
MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000

View file

@ -148,7 +148,7 @@ class ReceiptEventSource(object):
to_key = yield self.get_current_key()
if from_key == to_key:
return ([], to_key)
return [], to_key
events = yield self.store.get_linearized_receipts_for_rooms(
room_ids, from_key=from_key, to_key=to_key

View file

@ -24,13 +24,11 @@ from synapse.api.errors import (
AuthError,
Codes,
ConsentNotGivenError,
InvalidCaptchaError,
LimitExceededError,
RegistrationError,
SynapseError,
)
from synapse.config.server import is_threepid_reserved
from synapse.http.client import CaptchaServerHttpClient
from synapse.http.servlet import assert_params_in_dict
from synapse.replication.http.login import RegisterDeviceReplicationServlet
from synapse.replication.http.register import (
@ -39,7 +37,6 @@ from synapse.replication.http.register import (
)
from synapse.types import RoomAlias, RoomID, UserID, create_requester
from synapse.util.async_helpers import Linearizer
from synapse.util.threepids import check_3pid_allowed
from ._base import BaseHandler
@ -59,7 +56,6 @@ class RegistrationHandler(BaseHandler):
self._auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
self.captcha_client = CaptchaServerHttpClient(hs)
self.identity_handler = self.hs.get_handlers().identity_handler
self.ratelimiter = hs.get_registration_ratelimiter()
@ -279,16 +275,12 @@ class RegistrationHandler(BaseHandler):
fake_requester = create_requester(user_id)
# try to create the room if we're the first real user on the server. Note
# that an auto-generated support user is not a real user and will never be
# that an auto-generated support or bot user is not a real user and will never be
# the user to create the room
should_auto_create_rooms = False
is_support = yield self.store.is_support_user(user_id)
# There is an edge case where the first user is the support user, then
# the room is never created, though this seems unlikely and
# recoverable from given the support user being involved in the first
# place.
if self.hs.config.autocreate_auto_join_rooms and not is_support:
count = yield self.store.count_all_users()
is_real_user = yield self.store.is_real_user(user_id)
if self.hs.config.autocreate_auto_join_rooms and is_real_user:
count = yield self.store.count_real_users()
should_auto_create_rooms = count == 1
for r in self.hs.config.auto_join_rooms:
logger.info("Auto-joining %s to %s", user_id, r)
@ -362,70 +354,6 @@ class RegistrationHandler(BaseHandler):
)
return user_id
@defer.inlineCallbacks
def check_recaptcha(self, ip, private_key, challenge, response):
"""
Checks a recaptcha is correct.
Used only by c/s api v1
"""
captcha_response = yield self._validate_captcha(
ip, private_key, challenge, response
)
if not captcha_response["valid"]:
logger.info(
"Invalid captcha entered from %s. Error: %s",
ip,
captcha_response["error_url"],
)
raise InvalidCaptchaError(error_url=captcha_response["error_url"])
else:
logger.info("Valid captcha entered from %s", ip)
@defer.inlineCallbacks
def register_email(self, threepidCreds):
"""
Registers emails with an identity server.
Used only by c/s api v1
"""
for c in threepidCreds:
logger.info(
"validating threepidcred sid %s on id server %s",
c["sid"],
c["idServer"],
)
try:
threepid = yield self.identity_handler.threepid_from_creds(c)
except Exception:
logger.exception("Couldn't validate 3pid")
raise RegistrationError(400, "Couldn't validate 3pid")
if not threepid:
raise RegistrationError(400, "Couldn't validate 3pid")
logger.info(
"got threepid with medium '%s' and address '%s'",
threepid["medium"],
threepid["address"],
)
if not check_3pid_allowed(self.hs, threepid["medium"], threepid["address"]):
raise RegistrationError(403, "Third party identifier is not allowed")
@defer.inlineCallbacks
def bind_emails(self, user_id, threepidCreds):
"""Links emails with a user ID and informs an identity server.
Used only by c/s api v1
"""
# Now we have a matrix ID, bind it to the threepids we were given
for c in threepidCreds:
# XXX: This should be a deferred list, shouldn't it?
yield self.identity_handler.bind_threepid(c, user_id)
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
# don't allow people to register the server notices mxid
if self._server_notices_mxid is not None:
@ -463,45 +391,8 @@ class RegistrationHandler(BaseHandler):
self._next_generated_user_id += 1
return str(id)
@defer.inlineCallbacks
def _validate_captcha(self, ip_addr, private_key, challenge, response):
"""Validates the captcha provided.
Used only by c/s api v1
Returns:
dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.
"""
response = yield self._submit_captcha(ip_addr, private_key, challenge, response)
# parse Google's response. Lovely format..
lines = response.split("\n")
json = {
"valid": lines[0] == "true",
"error_url": "http://www.recaptcha.net/recaptcha/api/challenge?"
+ "error=%s" % lines[1],
}
return json
@defer.inlineCallbacks
def _submit_captcha(self, ip_addr, private_key, challenge, response):
"""
Used only by c/s api v1
"""
data = yield self.captcha_client.post_urlencoded_get_raw(
"http://www.recaptcha.net:80/recaptcha/api/verify",
args={
"privatekey": private_key,
"remoteip": ip_addr,
"challenge": challenge,
"response": response,
},
)
return data
@defer.inlineCallbacks
def _join_user_to_room(self, requester, room_identifier):
room_id = None
room_member_handler = self.hs.get_room_member_handler()
if RoomID.is_valid(room_identifier):
room_id = room_identifier
@ -622,7 +513,7 @@ class RegistrationHandler(BaseHandler):
initial_display_name=initial_display_name,
is_guest=is_guest,
)
return (r["device_id"], r["access_token"])
return r["device_id"], r["access_token"]
valid_until_ms = None
if self.session_lifetime is not None:
@ -648,9 +539,7 @@ class RegistrationHandler(BaseHandler):
return (device_id, access_token)
@defer.inlineCallbacks
def post_registration_actions(
self, user_id, auth_result, access_token, bind_email, bind_msisdn
):
def post_registration_actions(self, user_id, auth_result, access_token):
"""A user has completed registration
Args:
@ -659,18 +548,10 @@ class RegistrationHandler(BaseHandler):
registered user.
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
bind_email (bool): Whether to bind the email with the identity
server.
bind_msisdn (bool): Whether to bind the msisdn with the identity
server.
"""
if self.hs.config.worker_app:
yield self._post_registration_client(
user_id=user_id,
auth_result=auth_result,
access_token=access_token,
bind_email=bind_email,
bind_msisdn=bind_msisdn,
user_id=user_id, auth_result=auth_result, access_token=access_token
)
return
@ -683,13 +564,11 @@ class RegistrationHandler(BaseHandler):
):
yield self.store.upsert_monthly_active_user(user_id)
yield self._register_email_threepid(
user_id, threepid, access_token, bind_email
)
yield self._register_email_threepid(user_id, threepid, access_token)
if auth_result and LoginType.MSISDN in auth_result:
threepid = auth_result[LoginType.MSISDN]
yield self._register_msisdn_threepid(user_id, threepid, bind_msisdn)
yield self._register_msisdn_threepid(user_id, threepid)
if auth_result and LoginType.TERMS in auth_result:
yield self._on_user_consented(user_id, self.hs.config.user_consent_version)
@ -708,14 +587,12 @@ class RegistrationHandler(BaseHandler):
yield self.post_consent_actions(user_id)
@defer.inlineCallbacks
def _register_email_threepid(self, user_id, threepid, token, bind_email):
def _register_email_threepid(self, user_id, threepid, token):
"""Add an email address as a 3pid identifier
Also adds an email pusher for the email address, if configured in the
HS config
Also optionally binds emails to the given user_id on the identity server
Must be called on master.
Args:
@ -723,8 +600,6 @@ class RegistrationHandler(BaseHandler):
threepid (object): m.login.email.identity auth response
token (str|None): access_token for the user, or None if not logged
in.
bind_email (bool): true if the client requested the email to be
bound at the identity server
Returns:
defer.Deferred:
"""
@ -766,29 +641,15 @@ class RegistrationHandler(BaseHandler):
data={},
)
if bind_email:
logger.info("bind_email specified: binding")
logger.debug("Binding emails %s to %s" % (threepid, user_id))
yield self.identity_handler.bind_threepid(
threepid["threepid_creds"], user_id
)
else:
logger.info("bind_email not specified: not binding email")
@defer.inlineCallbacks
def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn):
def _register_msisdn_threepid(self, user_id, threepid):
"""Add a phone number as a 3pid identifier
Also optionally binds msisdn to the given user_id on the identity server
Must be called on master.
Args:
user_id (str): id of user
threepid (object): m.login.msisdn auth response
token (str): access_token for the user
bind_email (bool): true if the client requested the email to be
bound at the identity server
Returns:
defer.Deferred:
"""
@ -804,12 +665,3 @@ class RegistrationHandler(BaseHandler):
yield self._auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], threepid["validated_at"]
)
if bind_msisdn:
logger.info("bind_msisdn specified: binding")
logger.debug("Binding msisdn %s to %s", threepid, user_id)
yield self.identity_handler.bind_threepid(
threepid["threepid_creds"], user_id
)
else:
logger.info("bind_msisdn not specified: not binding msisdn")

View file

@ -560,6 +560,18 @@ class RoomCreationHandler(BaseHandler):
yield self.event_creation_handler.assert_accepted_privacy_policy(requester)
power_level_content_override = config.get("power_level_content_override")
if (
power_level_content_override
and "users" in power_level_content_override
and user_id not in power_level_content_override["users"]
):
raise SynapseError(
400,
"Not a valid power_level_content_override: 'users' did not contain %s"
% (user_id,),
)
invite_3pid_list = config.get("invite_3pid", [])
visibility = config.get("visibility", None)
@ -567,8 +579,8 @@ class RoomCreationHandler(BaseHandler):
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
directory_handler = self.hs.get_handlers().directory_handler
if room_alias:
directory_handler = self.hs.get_handlers().directory_handler
yield directory_handler.create_association(
requester=requester,
room_id=room_id,
@ -604,7 +616,7 @@ class RoomCreationHandler(BaseHandler):
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
power_level_content_override=config.get("power_level_content_override"),
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
)
@ -653,6 +665,7 @@ class RoomCreationHandler(BaseHandler):
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
id_access_token = invite_3pid.get("id_access_token") # optional
address = invite_3pid["address"]
medium = invite_3pid["medium"]
yield self.hs.get_room_member_handler().do_3pid_invite(
@ -663,6 +676,7 @@ class RoomCreationHandler(BaseHandler):
id_server,
requester,
txn_id=None,
id_access_token=id_access_token,
)
result = {"room_id": room_id}
@ -840,7 +854,6 @@ class RoomContextHandler(object):
)
if not event:
return None
return
filtered = yield (filter_evts([event]))
if not filtered:

View file

@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64, encode_base64
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules
from synapse.api.errors import Codes, HttpResponseException
from synapse.types import ThirdPartyInstanceID
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.descriptors import cachedInlineCallbacks
@ -485,7 +486,33 @@ class RoomListHandler(BaseHandler):
return {"chunk": [], "total_room_count_estimate": 0}
if search_filter:
# We currently don't support searching across federation, so we have
# Searching across federation is defined in MSC2197.
# However, the remote homeserver may or may not actually support it.
# So we first try an MSC2197 remote-filtered search, then fall back
# to a locally-filtered search if we must.
try:
res = yield self._get_remote_list_cached(
server_name,
limit=limit,
since_token=since_token,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
search_filter=search_filter,
)
return res
except HttpResponseException as hre:
syn_err = hre.to_synapse_error()
if hre.code in (404, 405) or syn_err.errcode in (
Codes.UNRECOGNIZED,
Codes.NOT_FOUND,
):
logger.debug("Falling back to locally-filtered /publicRooms")
else:
raise # Not an error that should trigger a fallback.
# if we reach this point, then we fall back to the situation where
# we currently don't support searching across federation, so we have
# to do it manually without pagination
limit = None
since_token = None

View file

@ -29,9 +29,11 @@ from twisted.internet import defer
from synapse import types
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header
from synapse.types import RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
from synapse.util.hash import sha256_and_url_safe_base64
from ._base import BaseHandler
@ -100,7 +102,7 @@ class RoomMemberHandler(object):
raise NotImplementedError()
@abc.abstractmethod
def _remote_reject_invite(self, remote_room_hosts, room_id, target):
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Attempt to reject an invite for a room this server is not in. If we
fail to do so we locally mark the invite as rejected.
@ -510,9 +512,7 @@ class RoomMemberHandler(object):
return res
@defer.inlineCallbacks
def send_membership_event(
self, requester, event, context, remote_room_hosts=None, ratelimit=True
):
def send_membership_event(self, requester, event, context, ratelimit=True):
"""
Change the membership status of a user in a room.
@ -522,16 +522,10 @@ class RoomMemberHandler(object):
act as the sender, will be skipped.
event (SynapseEvent): The membership event.
context: The context of the event.
is_guest (bool): Whether the sender is a guest.
room_hosts ([str]): Homeservers which are likely to already be in
the room, and could be danced with in order to join this
homeserver for the first time.
ratelimit (bool): Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
remote_room_hosts = remote_room_hosts or []
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
@ -634,7 +628,7 @@ class RoomMemberHandler(object):
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
return (RoomID.from_string(room_id), servers)
return RoomID.from_string(room_id), servers
@defer.inlineCallbacks
def _get_inviter(self, user_id, room_id):
@ -646,7 +640,15 @@ class RoomMemberHandler(object):
@defer.inlineCallbacks
def do_3pid_invite(
self, room_id, inviter, medium, address, id_server, requester, txn_id
self,
room_id,
inviter,
medium,
address,
id_server,
requester,
txn_id,
id_access_token=None,
):
if self.config.block_non_admin_invites:
is_requester_admin = yield self.auth.is_server_admin(requester.user)
@ -669,7 +671,12 @@ class RoomMemberHandler(object):
Codes.FORBIDDEN,
)
invitee = yield self._lookup_3pid(id_server, medium, address)
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server"
)
invitee = yield self._lookup_3pid(id_server, medium, address, id_access_token)
if invitee:
yield self.update_membership(
@ -677,13 +684,58 @@ class RoomMemberHandler(object):
)
else:
yield self._make_and_store_3pid_invite(
requester, id_server, medium, address, room_id, inviter, txn_id=txn_id
requester,
id_server,
medium,
address,
room_id,
inviter,
txn_id=txn_id,
id_access_token=id_access_token,
)
@defer.inlineCallbacks
def _lookup_3pid(self, id_server, medium, address):
def _lookup_3pid(self, id_server, medium, address, id_access_token=None):
"""Looks up a 3pid in the passed identity server.
Args:
id_server (str): The server name (including port, if required)
of the identity server to use.
medium (str): The type of the third party identifier (e.g. "email").
address (str): The third party identifier (e.g. "foo@example.com").
id_access_token (str|None): The access token to authenticate to the identity
server with
Returns:
str|None: the matrix ID of the 3pid, or None if it is not recognized.
"""
if id_access_token is not None:
try:
results = yield self._lookup_3pid_v2(
id_server, id_access_token, medium, address
)
return results
except Exception as e:
# Catch HttpResponseExcept for a non-200 response code
# Check if this identity server does not know about v2 lookups
if isinstance(e, HttpResponseException) and e.code == 404:
# This is an old identity server that does not yet support v2 lookups
logger.warning(
"Attempted v2 lookup on v1 identity server %s. Falling "
"back to v1",
id_server,
)
else:
logger.warning("Error when looking up hashing details: %s", e)
return None
return (yield self._lookup_3pid_v1(id_server, medium, address))
@defer.inlineCallbacks
def _lookup_3pid_v1(self, id_server, medium, address):
"""Looks up a 3pid in the passed identity server using v1 lookup.
Args:
id_server (str): The server name (including port, if required)
of the identity server to use.
@ -693,10 +745,6 @@ class RoomMemberHandler(object):
Returns:
str: the matrix ID of the 3pid, or None if it is not recognized.
"""
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server"
)
try:
data = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
@ -710,9 +758,116 @@ class RoomMemberHandler(object):
return data["mxid"]
except IOError as e:
logger.warn("Error from identity server lookup: %s" % (e,))
logger.warning("Error from v1 identity server lookup: %s" % (e,))
return None
@defer.inlineCallbacks
def _lookup_3pid_v2(self, id_server, id_access_token, medium, address):
"""Looks up a 3pid in the passed identity server using v2 lookup.
Args:
id_server (str): The server name (including port, if required)
of the identity server to use.
id_access_token (str): The access token to authenticate to the identity server with
medium (str): The type of the third party identifier (e.g. "email").
address (str): The third party identifier (e.g. "foo@example.com").
Returns:
Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised.
"""
# Check what hashing details are supported by this identity server
hash_details = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server),
{"access_token": id_access_token},
)
if not isinstance(hash_details, dict):
logger.warning(
"Got non-dict object when checking hash details of %s%s: %s",
id_server_scheme,
id_server,
hash_details,
)
raise SynapseError(
400,
"Non-dict object from %s%s during v2 hash_details request: %s"
% (id_server_scheme, id_server, hash_details),
)
# Extract information from hash_details
supported_lookup_algorithms = hash_details.get("algorithms")
lookup_pepper = hash_details.get("lookup_pepper")
if (
not supported_lookup_algorithms
or not isinstance(supported_lookup_algorithms, list)
or not lookup_pepper
or not isinstance(lookup_pepper, str)
):
raise SynapseError(
400,
"Invalid hash details received from identity server %s%s: %s"
% (id_server_scheme, id_server, hash_details),
)
# Check if any of the supported lookup algorithms are present
if LookupAlgorithm.SHA256 in supported_lookup_algorithms:
# Perform a hashed lookup
lookup_algorithm = LookupAlgorithm.SHA256
# Hash address, medium and the pepper with sha256
to_hash = "%s %s %s" % (address, medium, lookup_pepper)
lookup_value = sha256_and_url_safe_base64(to_hash)
elif LookupAlgorithm.NONE in supported_lookup_algorithms:
# Perform a non-hashed lookup
lookup_algorithm = LookupAlgorithm.NONE
# Combine together plaintext address and medium
lookup_value = "%s %s" % (address, medium)
else:
logger.warning(
"None of the provided lookup algorithms of %s are supported: %s",
id_server,
supported_lookup_algorithms,
)
raise SynapseError(
400,
"Provided identity server does not support any v2 lookup "
"algorithms that this homeserver supports.",
)
# Authenticate with identity server given the access token from the client
headers = {"Authorization": create_id_access_token_header(id_access_token)}
try:
lookup_results = yield self.simple_http_client.post_json_get_json(
"%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server),
{
"addresses": [lookup_value],
"algorithm": lookup_algorithm,
"pepper": lookup_pepper,
},
headers=headers,
)
except Exception as e:
logger.warning("Error when performing a v2 3pid lookup: %s", e)
raise SynapseError(
500, "Unknown error occurred during identity server lookup"
)
# Check for a mapping from what we looked up to an MXID
if "mappings" not in lookup_results or not isinstance(
lookup_results["mappings"], dict
):
logger.warning("No results from 3pid lookup")
return None
# Return the MXID if it's available, or None otherwise
mxid = lookup_results["mappings"].get(lookup_value)
return mxid
@defer.inlineCallbacks
def _verify_any_signature(self, data, server_hostname):
if server_hostname not in data["signatures"]:
@ -737,7 +892,15 @@ class RoomMemberHandler(object):
@defer.inlineCallbacks
def _make_and_store_3pid_invite(
self, requester, id_server, medium, address, room_id, user, txn_id
self,
requester,
id_server,
medium,
address,
room_id,
user,
txn_id,
id_access_token=None,
):
room_state = yield self.state_handler.get_current_state(room_id)
@ -786,6 +949,7 @@ class RoomMemberHandler(object):
room_name=room_name,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,
)
)
@ -823,6 +987,7 @@ class RoomMemberHandler(object):
room_name,
inviter_display_name,
inviter_avatar_url,
id_access_token=None,
):
"""
Asks an identity server for a third party invite.
@ -842,6 +1007,8 @@ class RoomMemberHandler(object):
inviter_display_name (str): The current display name of the
inviter.
inviter_avatar_url (str): The URL of the inviter's avatar.
id_access_token (str|None): The access token to authenticate to the identity
server with
Returns:
A deferred tuple containing:
@ -852,12 +1019,6 @@ class RoomMemberHandler(object):
display_name (str): A user-friendly name to represent the invited
user.
"""
is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
id_server_scheme,
id_server,
)
invite_config = {
"medium": medium,
"address": address,
@ -871,22 +1032,66 @@ class RoomMemberHandler(object):
"sender_avatar_url": inviter_avatar_url,
}
try:
data = yield self.simple_http_client.post_json_get_json(
is_url, invite_config
# Add the identity service access token to the JSON body and use the v2
# Identity Service endpoints if id_access_token is present
data = None
base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server)
if id_access_token:
key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
id_server_scheme,
id_server,
)
except HttpResponseException as e:
# Some identity servers may only support application/x-www-form-urlencoded
# types. This is especially true with old instances of Sydent, see
# https://github.com/matrix-org/sydent/pull/170
logger.info(
"Failed to POST %s with JSON, falling back to urlencoded form: %s",
is_url,
e,
)
data = yield self.simple_http_client.post_urlencoded_get_json(
is_url, invite_config
# Attempt a v2 lookup
url = base_url + "/v2/store-invite"
try:
data = yield self.simple_http_client.post_json_get_json(
url,
invite_config,
{"Authorization": create_id_access_token_header(id_access_token)},
)
except HttpResponseException as e:
if e.code != 404:
logger.info("Failed to POST %s with JSON: %s", url, e)
raise e
if data is None:
key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
id_server_scheme,
id_server,
)
url = base_url + "/api/v1/store-invite"
try:
data = yield self.simple_http_client.post_json_get_json(
url, invite_config
)
except HttpResponseException as e:
logger.warning(
"Error trying to call /store-invite on %s%s: %s",
id_server_scheme,
id_server,
e,
)
if data is None:
# Some identity servers may only support application/x-www-form-urlencoded
# types. This is especially true with old instances of Sydent, see
# https://github.com/matrix-org/sydent/pull/170
try:
data = yield self.simple_http_client.post_urlencoded_get_json(
url, invite_config
)
except HttpResponseException as e:
logger.warning(
"Error calling /store-invite on %s%s with fallback "
"encoding: %s",
id_server_scheme,
id_server,
e,
)
raise e
# TODO: Check for success
token = data["token"]
@ -894,8 +1099,7 @@ class RoomMemberHandler(object):
if "public_key" in data:
fallback_public_key = {
"public_key": data["public_key"],
"key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid"
% (id_server_scheme, id_server),
"key_validity_url": key_validity_url,
}
else:
fallback_public_key = public_keys[0]
@ -903,7 +1107,7 @@ class RoomMemberHandler(object):
if not public_keys:
public_keys.append(fallback_public_key)
display_name = data["display_name"]
return (token, public_keys, fallback_public_key, display_name)
return token, public_keys, fallback_public_key, display_name
@defer.inlineCallbacks
def _is_host_in_room(self, current_state_ids):
@ -962,9 +1166,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
)
if complexity:
if complexity["v1"] > max_complexity:
return True
return False
return complexity["v1"] > max_complexity
return None
@defer.inlineCallbacks
@ -980,10 +1182,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = yield self.store.get_room_complexity(room_id)
if complexity["v1"] > max_complexity:
return True
return False
return complexity["v1"] > max_complexity
@defer.inlineCallbacks
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@ -1062,7 +1261,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
logger.warn("Failed to reject invite: %s", e)
logger.warning("Failed to reject invite: %s", e)
yield self.store.locally_reject_invite(target.to_string(), room_id)
return {}

View file

@ -14,15 +14,14 @@
# limitations under the License.
import logging
from collections import Counter
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.constants import EventTypes, Membership
from synapse.handlers.state_deltas import StateDeltasHandler
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
@ -62,11 +61,10 @@ class StatsHandler(StateDeltasHandler):
def notify_new_event(self):
"""Called when there may be more deltas to process
"""
if not self.hs.config.stats_enabled:
if not self.hs.config.stats_enabled or self._is_processing:
return
if self._is_processing:
return
self._is_processing = True
@defer.inlineCallbacks
def process():
@ -75,39 +73,78 @@ class StatsHandler(StateDeltasHandler):
finally:
self._is_processing = False
self._is_processing = True
run_as_background_process("stats.notify_new_event", process)
@defer.inlineCallbacks
def _unsafe_process(self):
# If self.pos is None then means we haven't fetched it from DB
if self.pos is None:
self.pos = yield self.store.get_stats_stream_pos()
# If still None then the initial background update hasn't happened yet
if self.pos is None:
return None
self.pos = yield self.store.get_stats_positions()
# Loop round handling deltas until we're up to date
while True:
with Measure(self.clock, "stats_delta"):
deltas = yield self.store.get_current_state_deltas(self.pos)
if not deltas:
return
# Be sure to read the max stream_ordering *before* checking if there are any outstanding
# deltas, since there is otherwise a chance that we could miss updates which arrive
# after we check the deltas.
room_max_stream_ordering = yield self.store.get_room_max_stream_ordering()
if self.pos == room_max_stream_ordering:
break
logger.info("Handling %d state deltas", len(deltas))
yield self._handle_deltas(deltas)
deltas = yield self.store.get_current_state_deltas(self.pos)
self.pos = deltas[-1]["stream_id"]
yield self.store.update_stats_stream_pos(self.pos)
if deltas:
logger.debug("Handling %d state deltas", len(deltas))
room_deltas, user_deltas = yield self._handle_deltas(deltas)
event_processing_positions.labels("stats").set(self.pos)
max_pos = deltas[-1]["stream_id"]
else:
room_deltas = {}
user_deltas = {}
max_pos = room_max_stream_ordering
# Then count deltas for total_events and total_event_bytes.
room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
self.pos, max_pos
)
for room_id, fields in room_count.items():
room_deltas.setdefault(room_id, {}).update(fields)
for user_id, fields in user_count.items():
user_deltas.setdefault(user_id, {}).update(fields)
logger.debug("room_deltas: %s", room_deltas)
logger.debug("user_deltas: %s", user_deltas)
# Always call this so that we update the stats position.
yield self.store.bulk_update_stats_delta(
self.clock.time_msec(),
updates={"room": room_deltas, "user": user_deltas},
stream_id=max_pos,
)
logger.debug("Handled room stats to %s -> %s", self.pos, max_pos)
event_processing_positions.labels("stats").set(max_pos)
self.pos = max_pos
@defer.inlineCallbacks
def _handle_deltas(self, deltas):
"""Called with the state deltas to process
Returns:
Deferred[tuple[dict[str, Counter], dict[str, counter]]]
Resovles to two dicts, the room deltas and the user deltas,
mapping from room/user ID to changes in the various fields.
"""
Called with the state deltas to process
"""
room_to_stats_deltas = {}
user_to_stats_deltas = {}
room_to_state_updates = {}
for delta in deltas:
typ = delta["type"]
state_key = delta["state_key"]
@ -115,11 +152,10 @@ class StatsHandler(StateDeltasHandler):
event_id = delta["event_id"]
stream_id = delta["stream_id"]
prev_event_id = delta["prev_event_id"]
stream_pos = delta["stream_id"]
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
logger.debug("Handling: %r, %r %r, %s", room_id, typ, state_key, event_id)
token = yield self.store.get_earliest_token_for_room_stats(room_id)
token = yield self.store.get_earliest_token_for_stats("room", room_id)
# If the earliest token to begin from is larger than our current
# stream ID, skip processing this delta.
@ -131,203 +167,132 @@ class StatsHandler(StateDeltasHandler):
continue
if event_id is None and prev_event_id is None:
# Errr...
logger.error(
"event ID is None and so is the previous event ID. stream_id: %s",
stream_id,
)
continue
event_content = {}
sender = None
if event_id is not None:
event = yield self.store.get_event(event_id, allow_none=True)
if event:
event_content = event.content or {}
sender = event.sender
# We use stream_pos here rather than fetch by event_id as event_id
# may be None
now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
# All the values in this dict are deltas (RELATIVE changes)
room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter())
# quantise time to the nearest bucket
now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
room_state = room_to_state_updates.setdefault(room_id, {})
if prev_event_id is None:
# this state event doesn't overwrite another,
# so it is a new effective/current state event
room_stats_delta["current_state_events"] += 1
if typ == EventTypes.Member:
# we could use _get_key_change here but it's a bit inefficient
# given we're not testing for a specific result; might as well
# just grab the prev_membership and membership strings and
# compare them.
prev_event_content = {}
# We take None rather than leave as a previous membership
# in the absence of a previous event because we do not want to
# reduce the leave count when a new-to-the-room user joins.
prev_membership = None
if prev_event_id is not None:
prev_event = yield self.store.get_event(
prev_event_id, allow_none=True
)
if prev_event:
prev_event_content = prev_event.content
prev_membership = prev_event_content.get(
"membership", Membership.LEAVE
)
membership = event_content.get("membership", Membership.LEAVE)
prev_membership = prev_event_content.get("membership", Membership.LEAVE)
if prev_membership == membership:
continue
if prev_membership == Membership.JOIN:
yield self.store.update_stats_delta(
now, "room", room_id, "joined_members", -1
)
if prev_membership is None:
logger.debug("No previous membership for this user.")
elif membership == prev_membership:
pass # noop
elif prev_membership == Membership.JOIN:
room_stats_delta["joined_members"] -= 1
elif prev_membership == Membership.INVITE:
yield self.store.update_stats_delta(
now, "room", room_id, "invited_members", -1
)
room_stats_delta["invited_members"] -= 1
elif prev_membership == Membership.LEAVE:
yield self.store.update_stats_delta(
now, "room", room_id, "left_members", -1
)
room_stats_delta["left_members"] -= 1
elif prev_membership == Membership.BAN:
yield self.store.update_stats_delta(
now, "room", room_id, "banned_members", -1
)
room_stats_delta["banned_members"] -= 1
else:
err = "%s is not a valid prev_membership" % (repr(prev_membership),)
logger.error(err)
raise ValueError(err)
raise ValueError(
"%r is not a valid prev_membership" % (prev_membership,)
)
if membership == prev_membership:
pass # noop
if membership == Membership.JOIN:
yield self.store.update_stats_delta(
now, "room", room_id, "joined_members", +1
)
room_stats_delta["joined_members"] += 1
elif membership == Membership.INVITE:
yield self.store.update_stats_delta(
now, "room", room_id, "invited_members", +1
)
room_stats_delta["invited_members"] += 1
if sender and self.is_mine_id(sender):
user_to_stats_deltas.setdefault(sender, Counter())[
"invites_sent"
] += 1
elif membership == Membership.LEAVE:
yield self.store.update_stats_delta(
now, "room", room_id, "left_members", +1
)
room_stats_delta["left_members"] += 1
elif membership == Membership.BAN:
yield self.store.update_stats_delta(
now, "room", room_id, "banned_members", +1
)
room_stats_delta["banned_members"] += 1
else:
err = "%s is not a valid membership" % (repr(membership),)
logger.error(err)
raise ValueError(err)
raise ValueError("%r is not a valid membership" % (membership,))
user_id = state_key
if self.is_mine_id(user_id):
# update user_stats as it's one of our users
public = yield self._is_public_room(room_id)
# this accounts for transitions like leave → ban and so on.
has_changed_joinedness = (prev_membership == Membership.JOIN) != (
membership == Membership.JOIN
)
if membership == Membership.LEAVE:
yield self.store.update_stats_delta(
now,
"user",
user_id,
"public_rooms" if public else "private_rooms",
-1,
)
elif membership == Membership.JOIN:
yield self.store.update_stats_delta(
now,
"user",
user_id,
"public_rooms" if public else "private_rooms",
+1,
)
if has_changed_joinedness:
delta = +1 if membership == Membership.JOIN else -1
user_to_stats_deltas.setdefault(user_id, Counter())[
"joined_rooms"
] += delta
room_stats_delta["local_users_in_room"] += delta
elif typ == EventTypes.Create:
# Newly created room. Add it with all blank portions.
yield self.store.update_room_state(
room_id,
{
"join_rules": None,
"history_visibility": None,
"encryption": None,
"name": None,
"topic": None,
"avatar": None,
"canonical_alias": None,
},
room_state["is_federatable"] = (
event_content.get("m.federate", True) is True
)
if sender and self.is_mine_id(sender):
user_to_stats_deltas.setdefault(sender, Counter())[
"rooms_created"
] += 1
elif typ == EventTypes.JoinRules:
yield self.store.update_room_state(
room_id, {"join_rules": event_content.get("join_rule")}
)
is_public = yield self._get_key_change(
prev_event_id, event_id, "join_rule", JoinRules.PUBLIC
)
if is_public is not None:
yield self.update_public_room_stats(now, room_id, is_public)
room_state["join_rules"] = event_content.get("join_rule")
elif typ == EventTypes.RoomHistoryVisibility:
yield self.store.update_room_state(
room_id,
{"history_visibility": event_content.get("history_visibility")},
room_state["history_visibility"] = event_content.get(
"history_visibility"
)
is_public = yield self._get_key_change(
prev_event_id, event_id, "history_visibility", "world_readable"
)
if is_public is not None:
yield self.update_public_room_stats(now, room_id, is_public)
elif typ == EventTypes.Encryption:
yield self.store.update_room_state(
room_id, {"encryption": event_content.get("algorithm")}
)
room_state["encryption"] = event_content.get("algorithm")
elif typ == EventTypes.Name:
yield self.store.update_room_state(
room_id, {"name": event_content.get("name")}
)
room_state["name"] = event_content.get("name")
elif typ == EventTypes.Topic:
yield self.store.update_room_state(
room_id, {"topic": event_content.get("topic")}
)
room_state["topic"] = event_content.get("topic")
elif typ == EventTypes.RoomAvatar:
yield self.store.update_room_state(
room_id, {"avatar": event_content.get("url")}
)
room_state["avatar"] = event_content.get("url")
elif typ == EventTypes.CanonicalAlias:
yield self.store.update_room_state(
room_id, {"canonical_alias": event_content.get("alias")}
)
room_state["canonical_alias"] = event_content.get("alias")
elif typ == EventTypes.GuestAccess:
room_state["guest_access"] = event_content.get("guest_access")
@defer.inlineCallbacks
def update_public_room_stats(self, ts, room_id, is_public):
"""
Increment/decrement a user's number of public rooms when a room they are
in changes to/from public visibility.
for room_id, state in room_to_state_updates.items():
yield self.store.update_room_state(room_id, state)
Args:
ts (int): Timestamp in seconds
room_id (str)
is_public (bool)
"""
# For now, blindly iterate over all local users in the room so that
# we can handle the whole problem of copying buckets over as needed
user_ids = yield self.store.get_users_in_room(room_id)
for user_id in user_ids:
if self.hs.is_mine(UserID.from_string(user_id)):
yield self.store.update_stats_delta(
ts, "user", user_id, "public_rooms", +1 if is_public else -1
)
yield self.store.update_stats_delta(
ts, "user", user_id, "private_rooms", -1 if is_public else +1
)
@defer.inlineCallbacks
def _is_public_room(self, room_id):
join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules)
history_visibility = yield self.state.get_current_state(
room_id, EventTypes.RoomHistoryVisibility
)
if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or (
(
history_visibility
and history_visibility.content.get("history_visibility")
== "world_readable"
)
):
return True
else:
return False
return room_to_stats_deltas, user_to_stats_deltas

View file

@ -378,7 +378,7 @@ class SyncHandler(object):
event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"}
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
return (now_token, ephemeral_by_room)
return now_token, ephemeral_by_room
@defer.inlineCallbacks
def _load_filtered_recents(
@ -578,7 +578,6 @@ class SyncHandler(object):
if not last_events:
return None
return
last_event = last_events[-1]
state_ids = yield self.store.get_state_ids_for_event(
@ -786,9 +785,8 @@ class SyncHandler(object):
batch.events[0].event_id, state_filter=state_filter
)
else:
# Its not clear how we get here, but empirically we do
# (#5407). Logging has been added elsewhere to try and
# figure out where this state comes from.
# We can get here if the user has ignored the senders of all
# the recent events.
state_at_timeline_start = yield self.get_state_at(
room_id, stream_position=now_token, state_filter=state_filter
)
@ -1333,7 +1331,7 @@ class SyncHandler(object):
)
if not tags_by_room:
logger.debug("no-oping sync")
return ([], [], [], [])
return [], [], [], []
ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
"m.ignored_user_list", user_id=user_id
@ -1643,7 +1641,7 @@ class SyncHandler(object):
)
room_entries.append(entry)
return (room_entries, invited, newly_joined_rooms, newly_left_rooms)
return room_entries, invited, newly_joined_rooms, newly_left_rooms
@defer.inlineCallbacks
def _get_all_rooms(self, sync_result_builder, ignored_users):
@ -1717,7 +1715,7 @@ class SyncHandler(object):
)
)
return (room_entries, invited, [])
return room_entries, invited, []
@defer.inlineCallbacks
def _generate_room_entry(
@ -1771,20 +1769,9 @@ class SyncHandler(object):
newly_joined_room=newly_joined,
)
if not batch and batch.limited:
# This resulted in #5407, which is weird, so lets log! We do it
# here as we have the maximum amount of information.
user_id = sync_result_builder.sync_config.user.to_string()
logger.info(
"Issue #5407: Found limited batch with no events. user %s, room %s,"
" sync_config %s, newly_joined %s, events %s, batch %s.",
user_id,
room_id,
sync_config,
newly_joined,
events,
batch,
)
# Note: `batch` can be both empty and limited here in the case where
# `_load_filtered_recents` can't find any events the user should see
# (e.g. due to having ignored the sender of the last 50 events).
if newly_joined:
# debug for https://github.com/matrix-org/synapse/issues/4422

View file

@ -319,4 +319,4 @@ class TypingNotificationEventSource(object):
return self.get_typing_handler()._latest_room_serial
def get_pagination_rows(self, user, pagination_config, key):
return ([], pagination_config.from_key)
return [], pagination_config.from_key

View file

@ -35,7 +35,7 @@ from twisted.internet.interfaces import (
)
from twisted.python.failure import Failure
from twisted.web._newclient import ResponseDone
from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody
from twisted.web.client import Agent, HTTPConnectionPool, readBody
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
@ -46,6 +46,7 @@ from synapse.http import (
redact_uri,
)
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util.async_helpers import timeout_deferred
from synapse.util.caches import CACHE_SIZE_FACTOR
@ -269,42 +270,56 @@ class SimpleHttpClient(object):
# log request but strip `access_token` (AS requests for example include this)
logger.info("Sending request %s %s", method, redact_uri(uri))
try:
body_producer = None
if data is not None:
body_producer = QuieterFileBodyProducer(BytesIO(data))
with start_active_span(
"outgoing-client-request",
tags={
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.HTTP_METHOD: method,
tags.HTTP_URL: uri,
},
finish_on_close=True,
):
try:
body_producer = None
if data is not None:
body_producer = QuieterFileBodyProducer(BytesIO(data))
request_deferred = treq.request(
method,
uri,
agent=self.agent,
data=body_producer,
headers=headers,
**self._extra_treq_args
)
request_deferred = timeout_deferred(
request_deferred,
60,
self.hs.get_reactor(),
cancelled_to_request_timed_out_error,
)
response = yield make_deferred_yieldable(request_deferred)
request_deferred = treq.request(
method,
uri,
agent=self.agent,
data=body_producer,
headers=headers,
**self._extra_treq_args
)
request_deferred = timeout_deferred(
request_deferred,
60,
self.hs.get_reactor(),
cancelled_to_request_timed_out_error,
)
response = yield make_deferred_yieldable(request_deferred)
incoming_responses_counter.labels(method, response.code).inc()
logger.info(
"Received response to %s %s: %s", method, redact_uri(uri), response.code
)
return response
except Exception as e:
incoming_responses_counter.labels(method, "ERR").inc()
logger.info(
"Error sending request to %s %s: %s %s",
method,
redact_uri(uri),
type(e).__name__,
e.args[0],
)
raise
incoming_responses_counter.labels(method, response.code).inc()
logger.info(
"Received response to %s %s: %s",
method,
redact_uri(uri),
response.code,
)
return response
except Exception as e:
incoming_responses_counter.labels(method, "ERR").inc()
logger.info(
"Error sending request to %s %s: %s %s",
method,
redact_uri(uri),
type(e).__name__,
e.args[0],
)
set_tag(tags.ERROR, True)
set_tag("error_reason", e.args[0])
raise
@defer.inlineCallbacks
def post_urlencoded_get_json(self, uri, args={}, headers=None):
@ -599,38 +614,6 @@ def _readBodyToFile(response, stream, max_size):
return d
class CaptchaServerHttpClient(SimpleHttpClient):
"""
Separate HTTP client for talking to google's captcha servers
Only slightly special because accepts partial download responses
used only by c/s api v1
"""
@defer.inlineCallbacks
def post_urlencoded_get_raw(self, url, args={}):
query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True)
response = yield self.request(
"POST",
url,
data=query_bytes,
headers=Headers(
{
b"Content-Type": [b"application/x-www-form-urlencoded"],
b"User-Agent": [self.user_agent],
}
),
)
try:
body = yield make_deferred_yieldable(readBody(response))
return body
except PartialDownloadError as e:
# twisted dislikes google's response, no content length.
return e.response
def encode_urlencode_args(args):
return {k: encode_urlencode_arg(v) for k, v in args.items()}

View file

@ -14,21 +14,21 @@
# limitations under the License.
import logging
import urllib
import attr
from netaddr import IPAddress
from netaddr import AddrFormatError, IPAddress
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.web.client import URI, Agent, HTTPConnectionPool
from twisted.web.client import Agent, HTTPConnectionPool
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent
from twisted.web.iweb import IAgent, IAgentEndpointFactory
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
from synapse.http.federation.srv_resolver import Server, SrvResolver
from synapse.http.federation.well_known_resolver import WellKnownResolver
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.util import Clock
logger = logging.getLogger(__name__)
@ -36,8 +36,9 @@ logger = logging.getLogger(__name__)
@implementer(IAgent)
class MatrixFederationAgent(object):
"""An Agent-like thing which provides a `request` method which will look up a matrix
server and send an HTTP request to it.
"""An Agent-like thing which provides a `request` method which correctly
handles resolving matrix server names when using matrix://. Handles standard
https URIs as normal.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
@ -51,9 +52,9 @@ class MatrixFederationAgent(object):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
_well_known_cache (TTLCache|None):
TTLCache impl for storing cached well-known lookups. None to use a default
implementation.
_well_known_resolver (WellKnownResolver|None):
WellKnownResolver to use to perform well-known lookups. None to use a
default implementation.
"""
def __init__(
@ -61,49 +62,49 @@ class MatrixFederationAgent(object):
reactor,
tls_client_options_factory,
_srv_resolver=None,
_well_known_cache=None,
_well_known_resolver=None,
):
self._reactor = reactor
self._clock = Clock(reactor)
self._tls_client_options_factory = tls_client_options_factory
if _srv_resolver is None:
_srv_resolver = SrvResolver()
self._srv_resolver = _srv_resolver
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
self._well_known_resolver = WellKnownResolver(
self._agent = Agent.usingEndpointFactory(
self._reactor,
agent=Agent(
self._reactor,
pool=self._pool,
contextFactory=tls_client_options_factory,
MatrixHostnameEndpointFactory(
reactor, tls_client_options_factory, _srv_resolver
),
well_known_cache=_well_known_cache,
pool=self._pool,
)
if _well_known_resolver is None:
_well_known_resolver = WellKnownResolver(
self._reactor,
agent=Agent(
self._reactor,
pool=self._pool,
contextFactory=tls_client_options_factory,
),
)
self._well_known_resolver = _well_known_resolver
@defer.inlineCallbacks
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Args:
method (bytes): HTTP method: GET/POST/etc
uri (bytes): Absolute URI to be retrieved
headers (twisted.web.http_headers.Headers|None):
HTTP headers to send with the request, or None to
send no extra headers.
bodyProducer (twisted.web.iweb.IBodyProducer|None):
An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
Returns:
Deferred[twisted.web.iweb.IResponse]:
fires when the header of the response has been received (regardless of the
@ -111,210 +112,207 @@ class MatrixFederationAgent(object):
response from being received (including problems that prevent the request
from being sent).
"""
parsed_uri = URI.fromBytes(uri, defaultPort=-1)
res = yield self._route_matrix_uri(parsed_uri)
# We use urlparse as that will set `port` to None if there is no
# explicit port.
parsed_uri = urllib.parse.urlparse(uri)
# set up the TLS connection params
# If this is a matrix:// URI check if the server has delegated matrix
# traffic using well-known delegation.
#
# XXX disabling TLS is really only supported here for the benefit of the
# unit tests. We should make the UTs cope with TLS rather than having to make
# the code support the unit tests.
if self._tls_client_options_factory is None:
tls_options = None
else:
tls_options = self._tls_client_options_factory.get_options(
res.tls_server_name.decode("ascii")
# We have to do this here and not in the endpoint as we need to rewrite
# the host header with the delegated server name.
delegated_server = None
if (
parsed_uri.scheme == b"matrix"
and not _is_ip_literal(parsed_uri.hostname)
and not parsed_uri.port
):
well_known_result = yield self._well_known_resolver.get_well_known(
parsed_uri.hostname
)
delegated_server = well_known_result.delegated_server
# make sure that the Host header is set correctly
if delegated_server:
# Ok, the server has delegated matrix traffic to somewhere else, so
# lets rewrite the URL to replace the server with the delegated
# server name.
uri = urllib.parse.urlunparse(
(
parsed_uri.scheme,
delegated_server,
parsed_uri.path,
parsed_uri.params,
parsed_uri.query,
parsed_uri.fragment,
)
)
parsed_uri = urllib.parse.urlparse(uri)
# We need to make sure the host header is set to the netloc of the
# server.
if headers is None:
headers = Headers()
else:
headers = headers.copy()
if not headers.hasHeader(b"host"):
headers.addRawHeader(b"host", res.host_header)
headers.addRawHeader(b"host", parsed_uri.netloc)
class EndpointFactory(object):
@staticmethod
def endpointForURI(_uri):
ep = LoggingHostnameEndpoint(
self._reactor, res.target_host, res.target_port
)
if tls_options is not None:
ep = wrapClientTLS(tls_options, ep)
return ep
agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield make_deferred_yieldable(
agent.request(method, uri, headers, bodyProducer)
self._agent.request(method, uri, headers, bodyProducer)
)
return res
@defer.inlineCallbacks
def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
"""Helper for `request`: determine the routing for a Matrix URI
Args:
parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
if there is no explicit port given.
@implementer(IAgentEndpointFactory)
class MatrixHostnameEndpointFactory(object):
"""Factory for MatrixHostnameEndpoint for parsing to an Agent.
"""
lookup_well_known (bool): True if we should look up the .well-known file if
there is no SRV record.
def __init__(self, reactor, tls_client_options_factory, srv_resolver):
self._reactor = reactor
self._tls_client_options_factory = tls_client_options_factory
Returns:
Deferred[_RoutingResult]
"""
# check for an IP literal
try:
ip_address = IPAddress(parsed_uri.host.decode("ascii"))
except Exception:
# not an IP address
ip_address = None
if srv_resolver is None:
srv_resolver = SrvResolver()
if ip_address:
port = parsed_uri.port
if port == -1:
port = 8448
return _RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=port,
)
self._srv_resolver = srv_resolver
if parsed_uri.port != -1:
# there is an explicit port
return _RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=parsed_uri.port,
)
if lookup_well_known:
# try a .well-known lookup
well_known_result = yield self._well_known_resolver.get_well_known(
parsed_uri.host
)
well_known_server = well_known_result.delegated_server
if well_known_server:
# if we found a .well-known, start again, but don't do another
# .well-known lookup.
# parse the server name in the .well-known response into host/port.
# (This code is lifted from twisted.web.client.URI.fromBytes).
if b":" in well_known_server:
well_known_host, well_known_port = well_known_server.rsplit(b":", 1)
try:
well_known_port = int(well_known_port)
except ValueError:
# the part after the colon could not be parsed as an int
# - we assume it is an IPv6 literal with no port (the closing
# ']' stops it being parsed as an int)
well_known_host, well_known_port = well_known_server, -1
else:
well_known_host, well_known_port = well_known_server, -1
new_uri = URI(
scheme=parsed_uri.scheme,
netloc=well_known_server,
host=well_known_host,
port=well_known_port,
path=parsed_uri.path,
params=parsed_uri.params,
query=parsed_uri.query,
fragment=parsed_uri.fragment,
)
res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
return res
# try a SRV lookup
service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
server_list = yield self._srv_resolver.resolve_service(service_name)
if not server_list:
target_host = parsed_uri.host
port = 8448
logger.debug(
"No SRV record for %s, using %s:%i",
parsed_uri.host.decode("ascii"),
target_host.decode("ascii"),
port,
)
else:
target_host, port = pick_server_from_list(server_list)
logger.debug(
"Picked %s:%i from SRV records for %s",
target_host.decode("ascii"),
port,
parsed_uri.host.decode("ascii"),
)
return _RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=target_host,
target_port=port,
def endpointForURI(self, parsed_uri):
return MatrixHostnameEndpoint(
self._reactor,
self._tls_client_options_factory,
self._srv_resolver,
parsed_uri,
)
@implementer(IStreamClientEndpoint)
class LoggingHostnameEndpoint(object):
"""A wrapper for HostnameEndpint which logs when it connects"""
class MatrixHostnameEndpoint(object):
"""An endpoint that resolves matrix:// URLs using Matrix server name
resolution (i.e. via SRV). Does not check for well-known delegation.
def __init__(self, reactor, host, port, *args, **kwargs):
self.host = host
self.port = port
self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
Args:
reactor (IReactor)
tls_client_options_factory (ClientTLSOptionsFactory|None):
factory to use for fetching client tls options, or none to disable TLS.
srv_resolver (SrvResolver): The SRV resolver to use
parsed_uri (twisted.web.client.URI): The parsed URI that we're wanting
to connect to.
"""
def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri):
self._reactor = reactor
self._parsed_uri = parsed_uri
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
# unit tests. We should make the UTs cope with TLS rather than having to make
# the code support the unit tests.
if tls_client_options_factory is None:
self._tls_options = None
else:
self._tls_options = tls_client_options_factory.get_options(
self._parsed_uri.host
)
self._srv_resolver = srv_resolver
def connect(self, protocol_factory):
logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port)
return self.ep.connect(protocol_factory)
"""Implements IStreamClientEndpoint interface
"""
return run_in_background(self._do_connect, protocol_factory)
@defer.inlineCallbacks
def _do_connect(self, protocol_factory):
first_exception = None
server_list = yield self._resolve_server()
for server in server_list:
host = server.host
port = server.port
try:
logger.info("Connecting to %s:%i", host.decode("ascii"), port)
endpoint = HostnameEndpoint(self._reactor, host, port)
if self._tls_options:
endpoint = wrapClientTLS(self._tls_options, endpoint)
result = yield make_deferred_yieldable(
endpoint.connect(protocol_factory)
)
return result
except Exception as e:
logger.info(
"Failed to connect to %s:%i: %s", host.decode("ascii"), port, e
)
if not first_exception:
first_exception = e
# We return the first failure because that's probably the most interesting.
if first_exception:
raise first_exception
# This shouldn't happen as we should always have at least one host/port
# to try and if that doesn't work then we'll have an exception.
raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,))
@defer.inlineCallbacks
def _resolve_server(self):
"""Resolves the server name to a list of hosts and ports to attempt to
connect to.
Returns:
Deferred[list[Server]]
"""
if self._parsed_uri.scheme != b"matrix":
return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)]
# Note: We don't do well-known lookup as that needs to have happened
# before now, due to needing to rewrite the Host header of the HTTP
# request.
# We reparse the URI so that defaultPort is -1 rather than 80
parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes())
host = parsed_uri.hostname
port = parsed_uri.port
# If there is an explicit port or the host is an IP address we bypass
# SRV lookups and just use the given host/port.
if port or _is_ip_literal(host):
return [Server(host, port or 8448)]
server_list = yield self._srv_resolver.resolve_service(b"_matrix._tcp." + host)
if server_list:
return server_list
# No SRV records, so we fallback to host and 8448
return [Server(host, 8448)]
@attr.s
class _RoutingResult(object):
"""The result returned by `_route_matrix_uri`.
def _is_ip_literal(host):
"""Test if the given host name is either an IPv4 or IPv6 literal.
Contains the parameters needed to direct a federation connection to a particular
server.
Args:
host (bytes)
Where a SRV record points to several servers, this object contains a single server
chosen from the list.
Returns:
bool
"""
host_header = attr.ib()
"""
The value we should assign to the Host header (host:port from the matrix
URI, or .well-known).
host = host.decode("ascii")
:type: bytes
"""
tls_server_name = attr.ib()
"""
The server name we should set in the SNI (typically host, without port, from the
matrix URI or .well-known)
:type: bytes
"""
target_host = attr.ib()
"""
The hostname (or IP literal) we should route the TCP connection to (the target of the
SRV record, or the hostname from the URL/.well-known)
:type: bytes
"""
target_port = attr.ib()
"""
The port we should route the TCP connection to (the target of the SRV record, or
the port from the URL/.well-known, or 8448)
:type: int
"""
try:
IPAddress(host)
return True
except AddrFormatError:
return False

View file

@ -32,7 +32,7 @@ logger = logging.getLogger(__name__)
SERVER_CACHE = {}
@attr.s
@attr.s(slots=True, frozen=True)
class Server(object):
"""
Our record of an individual server which can be tried to reach a destination.
@ -53,34 +53,47 @@ class Server(object):
expires = attr.ib(default=0)
def pick_server_from_list(server_list):
"""Randomly choose a server from the server list
Args:
server_list (list[Server]): list of candidate servers
Returns:
Tuple[bytes, int]: (host, port) pair for the chosen server
def _sort_server_list(server_list):
"""Given a list of SRV records sort them into priority order and shuffle
each priority with the given weight.
"""
if not server_list:
raise RuntimeError("pick_server_from_list called with empty list")
priority_map = {}
# TODO: currently we only use the lowest-priority servers. We should maintain a
# cache of servers known to be "down" and filter them out
for server in server_list:
priority_map.setdefault(server.priority, []).append(server)
min_priority = min(s.priority for s in server_list)
eligible_servers = list(s for s in server_list if s.priority == min_priority)
total_weight = sum(s.weight for s in eligible_servers)
target_weight = random.randint(0, total_weight)
results = []
for priority in sorted(priority_map):
servers = priority_map[priority]
for s in eligible_servers:
target_weight -= s.weight
# This algorithms roughly follows the algorithm described in RFC2782,
# changed to remove an off-by-one error.
#
# N.B. Weights can be zero, which means that they should be picked
# rarely.
if target_weight <= 0:
return s.host, s.port
total_weight = sum(s.weight for s in servers)
# this should be impossible.
raise RuntimeError("pick_server_from_list got to end of eligible server list.")
# Total weight can become zero if there are only zero weight servers
# left, which we handle by just shuffling and appending to the results.
while servers and total_weight:
target_weight = random.randint(1, total_weight)
for s in servers:
target_weight -= s.weight
if target_weight <= 0:
break
results.append(s)
servers.remove(s)
total_weight -= s.weight
if servers:
random.shuffle(servers)
results.extend(servers)
return results
class SrvResolver(object):
@ -120,7 +133,7 @@ class SrvResolver(object):
if cache_entry:
if all(s.expires > now for s in cache_entry):
servers = list(cache_entry)
return servers
return _sort_server_list(servers)
try:
answers, _, _ = yield make_deferred_yieldable(
@ -169,4 +182,4 @@ class SrvResolver(object):
)
self._cache[service_name] = list(servers)
return servers
return _sort_server_list(servers)

View file

@ -32,22 +32,40 @@ from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter to add to the .well-known default cache ttl
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
# jitter factor to add to the .well-known default cache ttls
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# period to cache failure to fetch .well-known if there has recently been a
# valid well-known for that domain.
WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
# period to remember there was a valid well-known after valid record expires
WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# lower bound for .well-known cache period
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
# Attempt to refetch a cached well-known N% of the TTL before it expires.
# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
# we'll start trying to refetch 1 minute before it expires.
WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
# Number of times we retry fetching a well-known for a domain we know recently
# had a valid entry.
WELL_KNOWN_RETRY_ATTEMPTS = 3
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known")
_had_valid_well_known_cache = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
@ -59,14 +77,20 @@ class WellKnownResolver(object):
"""Handles well-known lookups for matrix servers.
"""
def __init__(self, reactor, agent, well_known_cache=None):
def __init__(
self, reactor, agent, well_known_cache=None, had_well_known_cache=None
):
self._reactor = reactor
self._clock = Clock(reactor)
if well_known_cache is None:
well_known_cache = _well_known_cache
if had_well_known_cache is None:
had_well_known_cache = _had_valid_well_known_cache
self._well_known_cache = well_known_cache
self._had_valid_well_known_cache = had_well_known_cache
self._well_known_agent = RedirectAgent(agent)
@defer.inlineCallbacks
@ -80,59 +104,86 @@ class WellKnownResolver(object):
Deferred[WellKnownLookupResult]: The result of the lookup
"""
try:
result = self._well_known_cache[server_name]
except KeyError:
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
with Measure(self._clock, "get_well_known"):
result, cache_period = yield self._do_get_well_known(server_name)
prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
server_name
)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
now = self._clock.time()
if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
return WellKnownLookupResult(delegated_server=prev_result)
except KeyError:
prev_result = None
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
try:
with Measure(self._clock, "get_well_known"):
result, cache_period = yield self._fetch_well_known(server_name)
except _FetchWellKnownFailure as e:
if prev_result and e.temporary:
# This is a temporary failure and we have a still valid cached
# result, so lets return that. Hopefully the next time we ask
# the remote will be back up again.
return WellKnownLookupResult(delegated_server=prev_result)
result = None
if self._had_valid_well_known_cache.get(server_name, False):
# We have recently seen a valid well-known record for this
# server, so we cache the lack of well-known for a shorter time.
cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
else:
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
return WellKnownLookupResult(delegated_server=result)
@defer.inlineCallbacks
def _do_get_well_known(self, server_name):
def _fetch_well_known(self, server_name):
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name (bytes): name of the server, from the requested url
Raises:
_FetchWellKnownFailure if we fail to lookup a result
Returns:
Deferred[Tuple[bytes|None|object],int]:
result, cache period, where result is one of:
- the new server name from the .well-known (as a `bytes`)
- None if there was no .well-known file.
- INVALID_WELL_KNOWN if the .well-known was invalid
Deferred[Tuple[bytes,int]]: The lookup result and cache period.
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
uri_str = uri.decode("ascii")
logger.info("Fetching %s", uri_str)
had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
# We do this in two steps to differentiate between possibly transient
# errors (e.g. can't connect to host, 503 response) and more permenant
# errors (such as getting a 404 response).
response, body = yield self._make_well_known_request(
server_name, retry=had_valid_well_known
)
try:
response = yield make_deferred_yieldable(
self._well_known_agent.request(b"GET", uri)
)
body = yield make_deferred_yieldable(readBody(response))
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json.loads(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
if not isinstance(parsed_body, dict):
raise Exception("not a dict")
if "m.server" not in parsed_body:
raise Exception("Missing key 'm.server'")
result = parsed_body["m.server"].encode("ascii")
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
logger.info("Error fetching %s: %s", uri_str, e)
# add some randomness to the TTL to avoid a stampeding herd every hour
# after startup
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
return (None, cache_period)
result = parsed_body["m.server"].encode("ascii")
logger.info("Error parsing well-known for %s: %s", server_name, e)
raise _FetchWellKnownFailure(temporary=False)
cache_period = _cache_period_from_headers(
response.headers, time_now=self._reactor.seconds
@ -141,12 +192,68 @@ class WellKnownResolver(object):
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
return (result, cache_period)
# We got a success, mark as such in the cache
self._had_valid_well_known_cache.set(
server_name,
bool(result),
cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
)
return result, cache_period
@defer.inlineCallbacks
def _make_well_known_request(self, server_name, retry):
"""Make the well known request.
This will retry the request if requested and it fails (with unable
to connect or receives a 5xx error).
Args:
server_name (bytes)
retry (bool): Whether to retry the request if it fails.
Returns:
Deferred[tuple[IResponse, bytes]] Returns the response object and
body. Response may be a non-200 response.
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
uri_str = uri.decode("ascii")
i = 0
while True:
i += 1
logger.info("Fetching %s", uri_str)
try:
response = yield make_deferred_yieldable(
self._well_known_agent.request(b"GET", uri)
)
body = yield make_deferred_yieldable(readBody(response))
if 500 <= response.code < 600:
raise Exception("Non-200 response %s" % (response.code,))
return response, body
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
logger.info("Error fetching %s: %s", uri_str, e)
raise _FetchWellKnownFailure(temporary=True)
logger.info("Error fetching %s: %s. Retrying", uri_str, e)
# Sleep briefly in the hopes that they come back up
yield self._clock.sleep(0.5)
def _cache_period_from_headers(headers, time_now=time.time):
@ -185,3 +292,10 @@ def _parse_cache_control(headers):
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s()
class _FetchWellKnownFailure(Exception):
# True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
# a temporary failure.
temporary = attr.ib()

View file

@ -36,7 +36,6 @@ from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
from twisted.web.http_headers import Headers
import synapse.logging.opentracing as opentracing
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
@ -50,6 +49,12 @@ from synapse.http import QuieterFileBodyProducer
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import (
inject_active_span_byte_dict,
set_tag,
start_active_span,
tags,
)
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
@ -340,21 +345,20 @@ class MatrixFederationHttpClient(object):
else:
query_bytes = b""
# Retreive current span
scope = opentracing.start_active_span(
scope = start_active_span(
"outgoing-federation-request",
tags={
opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
opentracing.tags.PEER_ADDRESS: request.destination,
opentracing.tags.HTTP_METHOD: request.method,
opentracing.tags.HTTP_URL: request.path,
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.PEER_ADDRESS: request.destination,
tags.HTTP_METHOD: request.method,
tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
# Inject the span into the headers
headers_dict = {}
opentracing.inject_active_span_byte_dict(headers_dict, request.destination)
inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
@ -436,9 +440,7 @@ class MatrixFederationHttpClient(object):
response.phrase.decode("ascii", errors="replace"),
)
opentracing.set_tag(
opentracing.tags.HTTP_STATUS_CODE, response.code
)
set_tag(tags.HTTP_STATUS_CODE, response.code)
if 200 <= response.code < 300:
pass

View file

@ -40,6 +40,7 @@ from synapse.api.errors import (
UnrecognizedRequestError,
)
from synapse.logging.context import preserve_fn
from synapse.logging.opentracing import trace_servlet
from synapse.util.caches import intern_dict
logger = logging.getLogger(__name__)
@ -257,7 +258,9 @@ class JsonResource(HttpServer, resource.Resource):
self.path_regexs = {}
self.hs = hs
def register_paths(self, method, path_patterns, callback, servlet_classname):
def register_paths(
self, method, path_patterns, callback, servlet_classname, trace=True
):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
@ -273,8 +276,16 @@ class JsonResource(HttpServer, resource.Resource):
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
trace (bool): Whether we should start a span to trace the servlet.
"""
method = method.encode("utf-8") # method is bytes on py3
if trace:
# We don't extract the context from the servlet because we can't
# trust the sender
callback = trace_servlet(servlet_classname)(callback)
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(

View file

@ -20,7 +20,6 @@ import logging
from canonicaljson import json
from synapse.api.errors import Codes, SynapseError
from synapse.logging.opentracing import trace_servlet
logger = logging.getLogger(__name__)
@ -298,10 +297,7 @@ class RestServlet(object):
servlet_classname = self.__class__.__name__
method_handler = getattr(self, "on_%s" % (method,))
http_server.register_paths(
method,
patterns,
trace_servlet(servlet_classname, method_handler),
servlet_classname,
method, patterns, method_handler, servlet_classname
)
else:

View file

@ -0,0 +1,374 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os.path
import sys
import typing
import warnings
from typing import List
import attr
from constantly import NamedConstant, Names, ValueConstant, Values
from zope.interface import implementer
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogObserver,
LogBeginner,
Logger,
LogLevel,
LogLevelFilterPredicate,
LogPublisher,
eventAsText,
jsonFileLogObserver,
)
from synapse.config._base import ConfigError
from synapse.logging._terse_json import (
TerseJSONToConsoleLogObserver,
TerseJSONToTCPLogObserver,
)
from synapse.logging.context import LoggingContext
def stdlib_log_level_to_twisted(level: str) -> LogLevel:
"""
Convert a stdlib log level to Twisted's log level.
"""
lvl = level.lower().replace("warning", "warn")
return LogLevel.levelWithName(lvl)
@attr.s
@implementer(ILogObserver)
class LogContextObserver(object):
"""
An ILogObserver which adds Synapse-specific log context information.
Attributes:
observer (ILogObserver): The target parent observer.
"""
observer = attr.ib()
def __call__(self, event: dict) -> None:
"""
Consume a log event and emit it to the parent observer after filtering
and adding log context information.
Args:
event (dict)
"""
# Filter out some useless events that Twisted outputs
if "log_text" in event:
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
return
if event["log_text"].startswith("(UDP Port "):
return
if event["log_text"].startswith("Timing out client") or event[
"log_format"
].startswith("Timing out client"):
return
context = LoggingContext.current_context()
# Copy the context information to the log event.
if context is not None:
context.copy_to_twisted_log_entry(event)
else:
# If there's no logging context, not even the root one, we might be
# starting up or it might be from non-Synapse code. Log it as if it
# came from the root logger.
event["request"] = None
event["scope"] = None
self.observer(event)
class PythonStdlibToTwistedLogger(logging.Handler):
"""
Transform a Python stdlib log message into a Twisted one.
"""
def __init__(self, observer, *args, **kwargs):
"""
Args:
observer (ILogObserver): A Twisted logging observer.
*args, **kwargs: Args/kwargs to be passed to logging.Handler.
"""
self.observer = observer
super().__init__(*args, **kwargs)
def emit(self, record: logging.LogRecord) -> None:
"""
Emit a record to Twisted's observer.
Args:
record (logging.LogRecord)
"""
self.observer(
{
"log_time": record.created,
"log_text": record.getMessage(),
"log_format": "{log_text}",
"log_namespace": record.name,
"log_level": stdlib_log_level_to_twisted(record.levelname),
}
)
def SynapseFileLogObserver(outFile: typing.IO[str]) -> FileLogObserver:
"""
A log observer that formats events like the traditional log formatter and
sends them to `outFile`.
Args:
outFile (file object): The file object to write to.
"""
def formatEvent(_event: dict) -> str:
event = dict(_event)
event["log_level"] = event["log_level"].name.upper()
event["log_format"] = "- {log_namespace} - {log_level} - {request} - " + (
event.get("log_format", "{log_text}") or "{log_text}"
)
return eventAsText(event, includeSystem=False) + "\n"
return FileLogObserver(outFile, formatEvent)
class DrainType(Names):
CONSOLE = NamedConstant()
CONSOLE_JSON = NamedConstant()
CONSOLE_JSON_TERSE = NamedConstant()
FILE = NamedConstant()
FILE_JSON = NamedConstant()
NETWORK_JSON_TERSE = NamedConstant()
class OutputPipeType(Values):
stdout = ValueConstant(sys.__stdout__)
stderr = ValueConstant(sys.__stderr__)
@attr.s
class DrainConfiguration(object):
name = attr.ib()
type = attr.ib()
location = attr.ib()
options = attr.ib(default=None)
@attr.s
class NetworkJSONTerseOptions(object):
maximum_buffer = attr.ib(type=int)
DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}}
def parse_drain_configs(
drains: dict
) -> typing.Generator[DrainConfiguration, None, None]:
"""
Parse the drain configurations.
Args:
drains (dict): A list of drain configurations.
Yields:
DrainConfiguration instances.
Raises:
ConfigError: If any of the drain configuration items are invalid.
"""
for name, config in drains.items():
if "type" not in config:
raise ConfigError("Logging drains require a 'type' key.")
try:
logging_type = DrainType.lookupByName(config["type"].upper())
except ValueError:
raise ConfigError(
"%s is not a known logging drain type." % (config["type"],)
)
if logging_type in [
DrainType.CONSOLE,
DrainType.CONSOLE_JSON,
DrainType.CONSOLE_JSON_TERSE,
]:
location = config.get("location")
if location is None or location not in ["stdout", "stderr"]:
raise ConfigError(
(
"The %s drain needs the 'location' key set to "
"either 'stdout' or 'stderr'."
)
% (logging_type,)
)
pipe = OutputPipeType.lookupByName(location).value
yield DrainConfiguration(name=name, type=logging_type, location=pipe)
elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]:
if "location" not in config:
raise ConfigError(
"The %s drain needs the 'location' key set." % (logging_type,)
)
location = config.get("location")
if os.path.abspath(location) != location:
raise ConfigError(
"File paths need to be absolute, '%s' is a relative path"
% (location,)
)
yield DrainConfiguration(name=name, type=logging_type, location=location)
elif logging_type in [DrainType.NETWORK_JSON_TERSE]:
host = config.get("host")
port = config.get("port")
maximum_buffer = config.get("maximum_buffer", 1000)
yield DrainConfiguration(
name=name,
type=logging_type,
location=(host, port),
options=NetworkJSONTerseOptions(maximum_buffer=maximum_buffer),
)
else:
raise ConfigError(
"The %s drain type is currently not implemented."
% (config["type"].upper(),)
)
def setup_structured_logging(
hs,
config,
log_config: dict,
logBeginner: LogBeginner,
redirect_stdlib_logging: bool = True,
) -> LogPublisher:
"""
Set up Twisted's structured logging system.
Args:
hs: The homeserver to use.
config (HomeserverConfig): The configuration of the Synapse homeserver.
log_config (dict): The log configuration to use.
"""
if config.no_redirect_stdio:
raise ConfigError(
"no_redirect_stdio cannot be defined using structured logging."
)
logger = Logger()
if "drains" not in log_config:
raise ConfigError("The logging configuration requires a list of drains.")
observers = [] # type: List[ILogObserver]
for observer in parse_drain_configs(log_config["drains"]):
# Pipe drains
if observer.type == DrainType.CONSOLE:
logger.debug(
"Starting up the {name} console logger drain", name=observer.name
)
observers.append(SynapseFileLogObserver(observer.location))
elif observer.type == DrainType.CONSOLE_JSON:
logger.debug(
"Starting up the {name} JSON console logger drain", name=observer.name
)
observers.append(jsonFileLogObserver(observer.location))
elif observer.type == DrainType.CONSOLE_JSON_TERSE:
logger.debug(
"Starting up the {name} terse JSON console logger drain",
name=observer.name,
)
observers.append(
TerseJSONToConsoleLogObserver(observer.location, metadata={})
)
# File drains
elif observer.type == DrainType.FILE:
logger.debug("Starting up the {name} file logger drain", name=observer.name)
log_file = open(observer.location, "at", buffering=1, encoding="utf8")
observers.append(SynapseFileLogObserver(log_file))
elif observer.type == DrainType.FILE_JSON:
logger.debug(
"Starting up the {name} JSON file logger drain", name=observer.name
)
log_file = open(observer.location, "at", buffering=1, encoding="utf8")
observers.append(jsonFileLogObserver(log_file))
elif observer.type == DrainType.NETWORK_JSON_TERSE:
metadata = {"server_name": hs.config.server_name}
log_observer = TerseJSONToTCPLogObserver(
hs=hs,
host=observer.location[0],
port=observer.location[1],
metadata=metadata,
maximum_buffer=observer.options.maximum_buffer,
)
log_observer.start()
observers.append(log_observer)
else:
# We should never get here, but, just in case, throw an error.
raise ConfigError("%s drain type cannot be configured" % (observer.type,))
publisher = LogPublisher(*observers)
log_filter = LogLevelFilterPredicate()
for namespace, namespace_config in log_config.get(
"loggers", DEFAULT_LOGGERS
).items():
# Set the log level for twisted.logger.Logger namespaces
log_filter.setLogLevelForNamespace(
namespace,
stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
)
# Also set the log levels for the stdlib logger namespaces, to prevent
# them getting to PythonStdlibToTwistedLogger and having to be formatted
if "level" in namespace_config:
logging.getLogger(namespace).setLevel(namespace_config.get("level"))
f = FilteringLogObserver(publisher, [log_filter])
lco = LogContextObserver(f)
if redirect_stdlib_logging:
stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
stdliblogger = logging.getLogger()
stdliblogger.addHandler(stuff_into_twisted)
# Always redirect standard I/O, otherwise other logging outputs might miss
# it.
logBeginner.beginLoggingTo([lco], redirectStandardIO=True)
return publisher
def reload_structured_logging(*args, log_config=None) -> None:
warnings.warn(
"Currently the structured logging system can not be reloaded, doing nothing"
)

View file

@ -0,0 +1,280 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Log formatters that output terse JSON.
"""
import sys
from collections import deque
from ipaddress import IPv4Address, IPv6Address, ip_address
from math import floor
from typing import IO
import attr
from simplejson import dumps
from zope.interface import implementer
from twisted.application.internet import ClientService
from twisted.internet.endpoints import (
HostnameEndpoint,
TCP4ClientEndpoint,
TCP6ClientEndpoint,
)
from twisted.internet.protocol import Factory, Protocol
from twisted.logger import FileLogObserver, ILogObserver, Logger
from twisted.python.failure import Failure
def flatten_event(event: dict, metadata: dict, include_time: bool = False):
"""
Flatten a Twisted logging event to an dictionary capable of being sent
as a log event to a logging aggregation system.
The format is vastly simplified and is not designed to be a "human readable
string" in the sense that traditional logs are. Instead, the structure is
optimised for searchability and filtering, with human-understandable log
keys.
Args:
event (dict): The Twisted logging event we are flattening.
metadata (dict): Additional data to include with each log message. This
can be information like the server name. Since the target log
consumer does not know who we are other than by host IP, this
allows us to forward through static information.
include_time (bool): Should we include the `time` key? If False, the
event time is stripped from the event.
"""
new_event = {}
# If it's a failure, make the new event's log_failure be the traceback text.
if "log_failure" in event:
new_event["log_failure"] = event["log_failure"].getTraceback()
# If it's a warning, copy over a string representation of the warning.
if "warning" in event:
new_event["warning"] = str(event["warning"])
# Stdlib logging events have "log_text" as their human-readable portion,
# Twisted ones have "log_format". For now, include the log_format, so that
# context only given in the log format (e.g. what is being logged) is
# available.
if "log_text" in event:
new_event["log"] = event["log_text"]
else:
new_event["log"] = event["log_format"]
# We want to include the timestamp when forwarding over the network, but
# exclude it when we are writing to stdout. This is because the log ingester
# (e.g. logstash, fluentd) can add its own timestamp.
if include_time:
new_event["time"] = round(event["log_time"], 2)
# Convert the log level to a textual representation.
new_event["level"] = event["log_level"].name.upper()
# Ignore these keys, and do not transfer them over to the new log object.
# They are either useless (isError), transferred manually above (log_time,
# log_level, etc), or contain Python objects which are not useful for output
# (log_logger, log_source).
keys_to_delete = [
"isError",
"log_failure",
"log_format",
"log_level",
"log_logger",
"log_source",
"log_system",
"log_time",
"log_text",
"observer",
"warning",
]
# If it's from the Twisted legacy logger (twisted.python.log), it adds some
# more keys we want to purge.
if event.get("log_namespace") == "log_legacy":
keys_to_delete.extend(["message", "system", "time"])
# Rather than modify the dictionary in place, construct a new one with only
# the content we want. The original event should be considered 'frozen'.
for key in event.keys():
if key in keys_to_delete:
continue
if isinstance(event[key], (str, int, bool, float)) or event[key] is None:
# If it's a plain type, include it as is.
new_event[key] = event[key]
else:
# If it's not one of those basic types, write out a string
# representation. This should probably be a warning in development,
# so that we are sure we are only outputting useful data.
new_event[key] = str(event[key])
# Add the metadata information to the event (e.g. the server_name).
new_event.update(metadata)
return new_event
def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver:
"""
A log observer that formats events to a flattened JSON representation.
Args:
outFile: The file object to write to.
metadata: Metadata to be added to each log object.
"""
def formatEvent(_event: dict) -> str:
flattened = flatten_event(_event, metadata)
return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n"
return FileLogObserver(outFile, formatEvent)
@attr.s
@implementer(ILogObserver)
class TerseJSONToTCPLogObserver(object):
"""
An IObserver that writes JSON logs to a TCP target.
Args:
hs (HomeServer): The Homeserver that is being logged for.
host: The host of the logging target.
port: The logging target's port.
metadata: Metadata to be added to each log entry.
"""
hs = attr.ib()
host = attr.ib(type=str)
port = attr.ib(type=int)
metadata = attr.ib(type=dict)
maximum_buffer = attr.ib(type=int)
_buffer = attr.ib(default=attr.Factory(deque), type=deque)
_writer = attr.ib(default=None)
_logger = attr.ib(default=attr.Factory(Logger))
def start(self) -> None:
# Connect without DNS lookups if it's a direct IP.
try:
ip = ip_address(self.host)
if isinstance(ip, IPv4Address):
endpoint = TCP4ClientEndpoint(
self.hs.get_reactor(), self.host, self.port
)
elif isinstance(ip, IPv6Address):
endpoint = TCP6ClientEndpoint(
self.hs.get_reactor(), self.host, self.port
)
except ValueError:
endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port)
factory = Factory.forProtocol(Protocol)
self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor())
self._service.startService()
def _write_loop(self) -> None:
"""
Implement the write loop.
"""
if self._writer:
return
self._writer = self._service.whenConnected()
@self._writer.addBoth
def writer(r):
if isinstance(r, Failure):
r.printTraceback(file=sys.__stderr__)
self._writer = None
self.hs.get_reactor().callLater(1, self._write_loop)
return
try:
for event in self._buffer:
r.transport.write(
dumps(event, ensure_ascii=False, separators=(",", ":")).encode(
"utf8"
)
)
r.transport.write(b"\n")
self._buffer.clear()
except Exception as e:
sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),))
self._writer = False
self.hs.get_reactor().callLater(1, self._write_loop)
def _handle_pressure(self) -> None:
"""
Handle backpressure by shedding events.
The buffer will, in this order, until the buffer is below the maximum:
- Shed DEBUG events
- Shed INFO events
- Shed the middle 50% of the events.
"""
if len(self._buffer) <= self.maximum_buffer:
return
# Strip out DEBUGs
self._buffer = deque(
filter(lambda event: event["level"] != "DEBUG", self._buffer)
)
if len(self._buffer) <= self.maximum_buffer:
return
# Strip out INFOs
self._buffer = deque(
filter(lambda event: event["level"] != "INFO", self._buffer)
)
if len(self._buffer) <= self.maximum_buffer:
return
# Cut the middle entries out
buffer_split = floor(self.maximum_buffer / 2)
old_buffer = self._buffer
self._buffer = deque()
for i in range(buffer_split):
self._buffer.append(old_buffer.popleft())
end_buffer = []
for i in range(buffer_split):
end_buffer.append(old_buffer.pop())
self._buffer.extend(reversed(end_buffer))
def __call__(self, event: dict) -> None:
flattened = flatten_event(event, self.metadata, include_time=True)
self._buffer.append(flattened)
# Handle backpressure, if it exists.
try:
self._handle_pressure()
except Exception:
# If handling backpressure fails,clear the buffer and log the
# exception.
self._buffer.clear()
self._logger.failure("Failed clearing backpressure")
# Try and write immediately.
self._write_loop()

View file

@ -25,6 +25,7 @@ See doc/log_contexts.rst for details on how this works.
import logging
import threading
import types
from typing import Any, List
from twisted.internet import defer, threads
@ -194,7 +195,7 @@ class LoggingContext(object):
class Sentinel(object):
"""Sentinel to represent the root context"""
__slots__ = []
__slots__ = [] # type: List[Any]
def __str__(self):
return "sentinel"
@ -202,6 +203,10 @@ class LoggingContext(object):
def copy_to(self, record):
pass
def copy_to_twisted_log_entry(self, record):
record["request"] = None
record["scope"] = None
def start(self):
pass
@ -330,6 +335,13 @@ class LoggingContext(object):
# we also track the current scope:
record.scope = self.scope
def copy_to_twisted_log_entry(self, record):
"""
Copy logging fields from this context to a Twisted log record.
"""
record["request"] = self.request
record["scope"] = self.scope
def start(self):
if get_thread_id() != self.main_thread:
logger.warning("Started logcontext %s on different thread", self)

View file

@ -43,6 +43,9 @@ OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as
an optional dependency. This does however limit the number of modifiable spans
at any point in the code to one. From here out references to `opentracing`
in the code snippets refer to the Synapses module.
Most methods provided in the module have a direct correlation to those provided
by opentracing. Refer to docs there for a more in-depth documentation on some of
the args and methods.
Tracing
-------
@ -68,52 +71,62 @@ set a tag on the current active span.
Tracing functions
-----------------
Functions can be easily traced using decorators. There is a decorator for
'normal' function and for functions which are actually deferreds. The name of
Functions can be easily traced using decorators. The name of
the function becomes the operation name for the span.
.. code-block:: python
from synapse.logging.opentracing import trace, trace_deferred
from synapse.logging.opentracing import trace
# Start a span using 'normal_function' as the operation name
# Start a span using 'interesting_function' as the operation name
@trace
def normal_function(*args, **kwargs):
def interesting_function(*args, **kwargs):
# Does all kinds of cool and expected things
return something_usual_and_useful
# Start a span using 'deferred_function' as the operation name
@trace_deferred
@defer.inlineCallbacks
def deferred_function(*args, **kwargs):
# We start
yield we_wait
# we finish
return something_usual_and_useful
Operation names can be explicitly set for functions by using
``trace_using_operation_name`` and
``trace_deferred_using_operation_name``
Operation names can be explicitly set for a function by passing the
operation name to ``trace``
.. code-block:: python
from synapse.logging.opentracing import (
trace_using_operation_name,
trace_deferred_using_operation_name
)
from synapse.logging.opentracing import trace
@trace_using_operation_name("A *much* better operation name")
def normal_function(*args, **kwargs):
@trace(opname="a_better_operation_name")
def interesting_badly_named_function(*args, **kwargs):
# Does all kinds of cool and expected things
return something_usual_and_useful
@trace_deferred_using_operation_name("Another exciting operation name!")
@defer.inlineCallbacks
def deferred_function(*args, **kwargs):
# We start
yield we_wait
# we finish
return something_usual_and_useful
Setting Tags
------------
To set a tag on the active span do
.. code-block:: python
from synapse.logging.opentracing import set_tag
set_tag(tag_name, tag_value)
There's a convenient decorator to tag all the args of the method. It uses
inspection in order to use the formal parameter names prefixed with 'ARG_' as
tag names. It uses kwarg names as tag names without the prefix.
.. code-block:: python
from synapse.logging.opentracing import tag_args
@tag_args
def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"):
pass
set_fates("the story", "the end", "the act")
# This will have the following tags
# - ARG_clotho: "the story"
# - ARG_lachesis: "the end"
# - ARG_atropos: "the act"
# - father: "Zues"
# - mother: "Themis"
Contexts and carriers
---------------------
@ -136,6 +149,9 @@ unchartered waters will require the enforcement of the whitelist.
``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes
in a destination and compares it to the whitelist.
Most injection methods take a 'destination' arg. The context will only be injected
if the destination matches the whitelist or the destination is None.
=======
Gotchas
=======
@ -161,16 +177,54 @@ from twisted.internet import defer
from synapse.config import ConfigError
# Helper class
class _DummyTagNames(object):
"""wrapper of opentracings tags. We need to have them if we
want to reference them without opentracing around. Clearly they
should never actually show up in a trace. `set_tags` overwrites
these with the correct ones."""
INVALID_TAG = "invalid-tag"
COMPONENT = INVALID_TAG
DATABASE_INSTANCE = INVALID_TAG
DATABASE_STATEMENT = INVALID_TAG
DATABASE_TYPE = INVALID_TAG
DATABASE_USER = INVALID_TAG
ERROR = INVALID_TAG
HTTP_METHOD = INVALID_TAG
HTTP_STATUS_CODE = INVALID_TAG
HTTP_URL = INVALID_TAG
MESSAGE_BUS_DESTINATION = INVALID_TAG
PEER_ADDRESS = INVALID_TAG
PEER_HOSTNAME = INVALID_TAG
PEER_HOST_IPV4 = INVALID_TAG
PEER_HOST_IPV6 = INVALID_TAG
PEER_PORT = INVALID_TAG
PEER_SERVICE = INVALID_TAG
SAMPLING_PRIORITY = INVALID_TAG
SERVICE = INVALID_TAG
SPAN_KIND = INVALID_TAG
SPAN_KIND_CONSUMER = INVALID_TAG
SPAN_KIND_PRODUCER = INVALID_TAG
SPAN_KIND_RPC_CLIENT = INVALID_TAG
SPAN_KIND_RPC_SERVER = INVALID_TAG
try:
import opentracing
tags = opentracing.tags
except ImportError:
opentracing = None
tags = _DummyTagNames
try:
from jaeger_client import Config as JaegerConfig
from synapse.logging.scopecontextmanager import LogContextScopeManager
except ImportError:
JaegerConfig = None
LogContextScopeManager = None
JaegerConfig = None # type: ignore
LogContextScopeManager = None # type: ignore
logger = logging.getLogger(__name__)
@ -185,8 +239,7 @@ _homeserver_whitelist = None
def only_if_tracing(func):
"""Executes the function only if we're tracing. Otherwise return.
Assumes the function wrapped may return None"""
"""Executes the function only if we're tracing. Otherwise returns None."""
@wraps(func)
def _only_if_tracing_inner(*args, **kwargs):
@ -198,6 +251,41 @@ def only_if_tracing(func):
return _only_if_tracing_inner
def ensure_active_span(message, ret=None):
"""Executes the operation only if opentracing is enabled and there is an active span.
If there is no active span it logs message at the error level.
Args:
message (str): Message which fills in "There was no active span when trying to %s"
in the error log if there is no active span and opentracing is enabled.
ret (object): return value if opentracing is None or there is no active span.
Returns (object): The result of the func or ret if opentracing is disabled or there
was no active span.
"""
def ensure_active_span_inner_1(func):
@wraps(func)
def ensure_active_span_inner_2(*args, **kwargs):
if not opentracing:
return ret
if not opentracing.tracer.active_span:
logger.error(
"There was no active span when trying to %s."
" Did you forget to start one or did a context slip?",
message,
)
return ret
return func(*args, **kwargs)
return ensure_active_span_inner_2
return ensure_active_span_inner_1
@contextlib.contextmanager
def _noop_context_manager(*args, **kwargs):
"""Does exactly what it says on the tin"""
@ -239,10 +327,6 @@ def init_tracer(config):
scope_manager=LogContextScopeManager(config),
).initialize_tracer()
# Set up tags to be opentracing's tags
global tags
tags = opentracing.tags
# Whitelisting
@ -269,7 +353,7 @@ def whitelisted_homeserver(destination):
Args:
destination (str)
"""
_homeserver_whitelist
if _homeserver_whitelist:
return _homeserver_whitelist.match(destination)
return False
@ -299,30 +383,28 @@ def start_active_span(
if opentracing is None:
return _noop_context_manager()
else:
# We need to enter the scope here for the logcontext to become active
return opentracing.tracer.start_active_span(
operation_name,
child_of=child_of,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
finish_on_close=finish_on_close,
)
return opentracing.tracer.start_active_span(
operation_name,
child_of=child_of,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
finish_on_close=finish_on_close,
)
def start_active_span_follows_from(operation_name, contexts):
if opentracing is None:
return _noop_context_manager()
else:
references = [opentracing.follows_from(context) for context in contexts]
scope = start_active_span(operation_name, references=references)
return scope
references = [opentracing.follows_from(context) for context in contexts]
scope = start_active_span(operation_name, references=references)
return scope
def start_active_span_from_context(
headers,
def start_active_span_from_request(
request,
operation_name,
references=None,
tags=None,
@ -331,9 +413,9 @@ def start_active_span_from_context(
finish_on_close=True,
):
"""
Extracts a span context from Twisted Headers.
Extracts a span context from a Twisted Request.
args:
headers (twisted.web.http_headers.Headers)
headers (twisted.web.http.Request)
For the other args see opentracing.tracer
@ -347,7 +429,9 @@ def start_active_span_from_context(
if opentracing is None:
return _noop_context_manager()
header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()}
header_dict = {
k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders()
}
context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict)
return opentracing.tracer.start_active_span(
@ -413,19 +497,19 @@ def start_active_span_from_edu(
# Opentracing setters for tags, logs, etc
@only_if_tracing
@ensure_active_span("set a tag")
def set_tag(key, value):
"""Sets a tag on the active span"""
opentracing.tracer.active_span.set_tag(key, value)
@only_if_tracing
@ensure_active_span("log")
def log_kv(key_values, timestamp=None):
"""Log to the active span"""
opentracing.tracer.active_span.log_kv(key_values, timestamp)
@only_if_tracing
@ensure_active_span("set the traces operation name")
def set_operation_name(operation_name):
"""Sets the operation name of the active span"""
opentracing.tracer.active_span.set_operation_name(operation_name)
@ -434,13 +518,18 @@ def set_operation_name(operation_name):
# Injection and extraction
@only_if_tracing
def inject_active_span_twisted_headers(headers, destination):
@ensure_active_span("inject the span into a header")
def inject_active_span_twisted_headers(headers, destination, check_destination=True):
"""
Injects a span context into twisted headers in-place
Args:
headers (twisted.web.http_headers.Headers)
destination (str): address of entity receiving the span context. If check_destination
is true the context will only be injected if the destination matches the
opentracing whitelist
check_destination (bool): If false, destination will be ignored and the context
will always be injected.
span (opentracing.Span)
Returns:
@ -454,7 +543,7 @@ def inject_active_span_twisted_headers(headers, destination):
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
if not whitelisted_homeserver(destination):
if check_destination and not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
@ -465,14 +554,19 @@ def inject_active_span_twisted_headers(headers, destination):
headers.addRawHeaders(key, value)
@only_if_tracing
def inject_active_span_byte_dict(headers, destination):
@ensure_active_span("inject the span into a byte dict")
def inject_active_span_byte_dict(headers, destination, check_destination=True):
"""
Injects a span context into a dict where the headers are encoded as byte
strings
Args:
headers (dict)
destination (str): address of entity receiving the span context. If check_destination
is true the context will only be injected if the destination matches the
opentracing whitelist
check_destination (bool): If false, destination will be ignored and the context
will always be injected.
span (opentracing.Span)
Returns:
@ -485,7 +579,7 @@ def inject_active_span_byte_dict(headers, destination):
here:
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
if not whitelisted_homeserver(destination):
if check_destination and not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
@ -497,16 +591,18 @@ def inject_active_span_byte_dict(headers, destination):
headers[key.encode()] = [value.encode()]
@only_if_tracing
def inject_active_span_text_map(carrier, destination=None):
@ensure_active_span("inject the span into a text map")
def inject_active_span_text_map(carrier, destination, check_destination=True):
"""
Injects a span context into a dict
Args:
carrier (dict)
destination (str): the name of the remote server. The span context
will only be injected if the destination matches the homeserver_whitelist
or destination is None.
destination (str): address of entity receiving the span context. If check_destination
is true the context will only be injected if the destination matches the
opentracing whitelist
check_destination (bool): If false, destination will be ignored and the context
will always be injected.
Returns:
In-place modification of carrier
@ -519,7 +615,7 @@ def inject_active_span_text_map(carrier, destination=None):
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
if destination and not whitelisted_homeserver(destination):
if check_destination and not whitelisted_homeserver(destination):
return
opentracing.tracer.inject(
@ -527,6 +623,31 @@ def inject_active_span_text_map(carrier, destination=None):
)
@ensure_active_span("get the active span context as a dict", ret={})
def get_active_span_text_map(destination=None):
"""
Gets a span context as a dict. This can be used instead of manually
injecting a span into an empty carrier.
Args:
destination (str): the name of the remote server.
Returns:
dict: the active span's context if opentracing is enabled, otherwise empty.
"""
if destination and not whitelisted_homeserver(destination):
return {}
carrier = {}
opentracing.tracer.inject(
opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
)
return carrier
@ensure_active_span("get the span context as a string.", ret={})
def active_span_context_as_string():
"""
Returns:
@ -566,70 +687,30 @@ def extract_text_map(carrier):
# Tracing decorators
def trace(func):
def trace(func=None, opname=None):
"""
Decorator to trace a function.
Sets the operation name to that of the function's.
Sets the operation name to that of the function's or that given
as operation_name. See the module's doc string for usage
examples.
"""
if opentracing is None:
return func
@wraps(func)
def _trace_inner(self, *args, **kwargs):
if opentracing is None:
return func(self, *args, **kwargs)
scope = start_active_span(func.__name__)
scope.__enter__()
try:
result = func(self, *args, **kwargs)
if isinstance(result, defer.Deferred):
def call_back(result):
scope.__exit__(None, None, None)
return result
def err_back(result):
scope.span.set_tag(tags.ERROR, True)
scope.__exit__(None, None, None)
return result
result.addCallbacks(call_back, err_back)
else:
scope.__exit__(None, None, None)
return result
except Exception as e:
scope.__exit__(type(e), None, e.__traceback__)
raise
return _trace_inner
def trace_using_operation_name(operation_name):
"""Decorator to trace a function. Explicitely sets the operation_name."""
def trace(func):
"""
Decorator to trace a function.
Sets the operation name to that of the function's.
"""
def decorator(func):
if opentracing is None:
return func
@wraps(func)
def _trace_inner(self, *args, **kwargs):
if opentracing is None:
return func(self, *args, **kwargs)
_opname = opname if opname else func.__name__
scope = start_active_span(operation_name)
@wraps(func)
def _trace_inner(*args, **kwargs):
if opentracing is None:
return func(*args, **kwargs)
scope = start_active_span(_opname)
scope.__enter__()
try:
result = func(self, *args, **kwargs)
result = func(*args, **kwargs)
if isinstance(result, defer.Deferred):
def call_back(result):
@ -642,6 +723,7 @@ def trace_using_operation_name(operation_name):
return result
result.addCallbacks(call_back, err_back)
else:
scope.__exit__(None, None, None)
@ -653,7 +735,10 @@ def trace_using_operation_name(operation_name):
return _trace_inner
return trace
if func:
return decorator(func)
else:
return decorator
def tag_args(func):
@ -665,76 +750,54 @@ def tag_args(func):
return func
@wraps(func)
def _tag_args_inner(self, *args, **kwargs):
def _tag_args_inner(*args, **kwargs):
argspec = inspect.getargspec(func)
for i, arg in enumerate(argspec.args[1:]):
set_tag("ARG_" + arg, args[i])
set_tag("args", args[len(argspec.args) :])
set_tag("kwargs", kwargs)
return func(self, *args, **kwargs)
return func(*args, **kwargs)
return _tag_args_inner
def trace_servlet(servlet_name, func):
def trace_servlet(servlet_name, extract_context=False):
"""Decorator which traces a serlet. It starts a span with some servlet specific
tags such as the servlet_name and request information"""
if not opentracing:
return func
tags such as the servlet_name and request information
@wraps(func)
@defer.inlineCallbacks
def _trace_servlet_inner(request, *args, **kwargs):
with start_active_span(
"incoming-client-request",
tags={
Args:
servlet_name (str): The name to be used for the span's operation_name
extract_context (bool): Whether to attempt to extract the opentracing
context from the request the servlet is handling.
"""
def _trace_servlet_inner_1(func):
if not opentracing:
return func
@wraps(func)
@defer.inlineCallbacks
def _trace_servlet_inner(request, *args, **kwargs):
request_tags = {
"request_id": request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
"servlet_name": servlet_name,
},
):
result = yield defer.maybeDeferred(func, request, *args, **kwargs)
return result
}
return _trace_servlet_inner
if extract_context:
scope = start_active_span_from_request(
request, servlet_name, tags=request_tags
)
else:
scope = start_active_span(servlet_name, tags=request_tags)
with scope:
result = yield defer.maybeDeferred(func, request, *args, **kwargs)
return result
# Helper class
return _trace_servlet_inner
class _DummyTagNames(object):
"""wrapper of opentracings tags. We need to have them if we
want to reference them without opentracing around. Clearly they
should never actually show up in a trace. `set_tags` overwrites
these with the correct ones."""
INVALID_TAG = "invalid-tag"
COMPONENT = INVALID_TAG
DATABASE_INSTANCE = INVALID_TAG
DATABASE_STATEMENT = INVALID_TAG
DATABASE_TYPE = INVALID_TAG
DATABASE_USER = INVALID_TAG
ERROR = INVALID_TAG
HTTP_METHOD = INVALID_TAG
HTTP_STATUS_CODE = INVALID_TAG
HTTP_URL = INVALID_TAG
MESSAGE_BUS_DESTINATION = INVALID_TAG
PEER_ADDRESS = INVALID_TAG
PEER_HOSTNAME = INVALID_TAG
PEER_HOST_IPV4 = INVALID_TAG
PEER_HOST_IPV6 = INVALID_TAG
PEER_PORT = INVALID_TAG
PEER_SERVICE = INVALID_TAG
SAMPLING_PRIORITY = INVALID_TAG
SERVICE = INVALID_TAG
SPAN_KIND = INVALID_TAG
SPAN_KIND_CONSUMER = INVALID_TAG
SPAN_KIND_PRODUCER = INVALID_TAG
SPAN_KIND_RPC_CLIENT = INVALID_TAG
SPAN_KIND_RPC_SERVER = INVALID_TAG
tags = _DummyTagNames
return _trace_servlet_inner_1

View file

@ -20,6 +20,7 @@ import os
import platform
import threading
import time
from typing import Dict, Union
import six
@ -29,20 +30,20 @@ from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricF
from twisted.internet import reactor
import synapse
from synapse.metrics._exposition import (
MetricsResource,
generate_latest,
start_http_server,
)
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
METRICS_PREFIX = "/_synapse/metrics"
running_on_pypy = platform.python_implementation() == "PyPy"
all_metrics = []
all_collectors = []
all_gauges = {}
all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]]
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
@ -385,6 +386,16 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"
# finished being processed.
event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
# Build info of the running server.
build_info = Gauge(
"synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
)
build_info.labels(
" ".join([platform.python_implementation(), platform.python_version()]),
get_version_string(synapse),
" ".join([platform.system(), platform.release()]),
).set(1)
last_ticked = time.time()

View file

@ -36,7 +36,9 @@ from twisted.web.resource import Resource
try:
from prometheus_client.samples import Sample
except ImportError:
Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"])
Sample = namedtuple(
"Sample", ["name", "labels", "value", "timestamp", "exemplar"]
) # type: ignore
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")

View file

@ -101,7 +101,7 @@ class ModuleApi(object):
)
user_id = yield self.register_user(localpart, displayname, emails)
_, access_token = yield self.register_device(user_id)
return (user_id, access_token)
return user_id, access_token
def register_user(self, localpart, displayname=None, emails=[]):
"""Registers a new user with given localpart and optional displayname, emails.

View file

@ -472,11 +472,11 @@ class Notifier(object):
joined_room_ids = yield self.store.get_rooms_for_user(user.to_string())
if explicit_room_id:
if explicit_room_id in joined_room_ids:
return ([explicit_room_id], True)
return [explicit_room_id], True
if (yield self._is_world_readable(explicit_room_id)):
return ([explicit_room_id], False)
return [explicit_room_id], False
raise AuthError(403, "Non-joined access not allowed")
return (joined_room_ids, True)
return joined_room_ids, True
@defer.inlineCallbacks
def _is_world_readable(self, room_id):

View file

@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object):
pl_event = auth_events.get(POWER_KEY)
return (pl_event.content if pl_event else {}, sender_level)
return pl_event.content if pl_event else {}, sender_level
@defer.inlineCallbacks
def action_for_event_by_user(self, event, context):

View file

@ -22,6 +22,7 @@ from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from synapse.logging import opentracing
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
@ -194,7 +195,17 @@ class HttpPusher(object):
)
for push_action in unprocessed:
processed = yield self._process_one(push_action)
with opentracing.start_active_span(
"http-push",
tags={
"authenticated_entity": self.user_id,
"event_id": push_action["event_id"],
"app_id": self.app_id,
"app_display_name": self.app_display_name,
},
):
processed = yield self._process_one(push_action)
if processed:
http_push_processed_counter.inc()
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC

View file

@ -131,14 +131,11 @@ class Mailer(object):
email_address (str): Email address we're sending the password
reset to
token (str): Unique token generated by the server to verify
password reset email was received
the email was received
client_secret (str): Unique token generated by the client to
group together multiple email sending attempts
sid (str): The generated session ID
"""
if email.utils.parseaddr(email_address)[1] == "":
raise RuntimeError("Invalid 'to' email address")
link = (
self.hs.config.public_baseurl
+ "_matrix/client/unstable/password_reset/email/submit_token"
@ -149,7 +146,34 @@ class Mailer(object):
yield self.send_email(
email_address,
"[%s] Password Reset Email" % self.hs.config.server_name,
"[%s] Password Reset" % self.hs.config.server_name,
template_vars,
)
@defer.inlineCallbacks
def send_registration_mail(self, email_address, token, client_secret, sid):
"""Send an email with a registration confirmation link to a user
Args:
email_address (str): Email address we're sending the registration
link to
token (str): Unique token generated by the server to verify
the email was received
client_secret (str): Unique token generated by the client to
group together multiple email sending attempts
sid (str): The generated session ID
"""
link = (
self.hs.config.public_baseurl
+ "_matrix/client/unstable/registration/email/submit_token"
"?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid)
)
template_vars = {"link": link}
yield self.send_email(
email_address,
"[%s] Register your Email Address" % self.hs.config.server_name,
template_vars,
)
@ -605,25 +629,50 @@ def format_ts_filter(value, format):
return time.strftime(format, time.localtime(value / 1000))
def load_jinja2_templates(config, template_html_name, template_text_name):
"""Load the jinja2 email templates from disk
def load_jinja2_templates(
template_dir,
template_filenames,
apply_format_ts_filter=False,
apply_mxc_to_http_filter=False,
public_baseurl=None,
):
"""Loads and returns one or more jinja2 templates and applies optional filters
Args:
template_dir (str): The directory where templates are stored
template_filenames (list[str]): A list of template filenames
apply_format_ts_filter (bool): Whether to apply a template filter that formats
timestamps
apply_mxc_to_http_filter (bool): Whether to apply a template filter that converts
mxc urls to http urls
public_baseurl (str|None): The public baseurl of the server. Required for
apply_mxc_to_http_filter to be enabled
Returns:
(template_html, template_text)
A list of jinja2 templates corresponding to the given list of filenames,
with order preserved
"""
logger.info("loading email templates from '%s'", config.email_template_dir)
loader = jinja2.FileSystemLoader(config.email_template_dir)
logger.info(
"loading email templates %s from '%s'", template_filenames, template_dir
)
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
template_html = env.get_template(template_html_name)
template_text = env.get_template(template_text_name)
if apply_format_ts_filter:
env.filters["format_ts"] = format_ts_filter
return template_html, template_text
if apply_mxc_to_http_filter and public_baseurl:
env.filters["mxc_to_http"] = _create_mxc_to_http_filter(public_baseurl)
templates = []
for template_filename in template_filenames:
template = env.get_template(template_filename)
templates.append(template)
return templates
def _create_mxc_to_http_filter(config):
def _create_mxc_to_http_filter(public_baseurl):
def mxc_to_http_filter(value, width, height, resize_method="crop"):
if value[0:6] != "mxc://":
return ""
@ -636,7 +685,7 @@ def _create_mxc_to_http_filter(config):
params = {"width": width, "height": height, "method": resize_method}
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
config.public_baseurl,
public_baseurl,
serverAndMediaId,
urllib.parse.urlencode(params),
fragment or "",

View file

@ -35,6 +35,7 @@ except Exception:
class PusherFactory(object):
def __init__(self, hs):
self.hs = hs
self.config = hs.config
self.pusher_types = {"http": HttpPusher}
@ -42,12 +43,16 @@ class PusherFactory(object):
if hs.config.email_enable_notifs:
self.mailers = {} # app_name -> Mailer
templates = load_jinja2_templates(
config=hs.config,
template_html_name=hs.config.email_notif_template_html,
template_text_name=hs.config.email_notif_template_text,
self.notif_template_html, self.notif_template_text = load_jinja2_templates(
self.config.email_template_dir,
[
self.config.email_notif_template_html,
self.config.email_notif_template_text,
],
apply_format_ts_filter=True,
apply_mxc_to_http_filter=True,
public_baseurl=self.config.public_baseurl,
)
self.notif_template_html, self.notif_template_text = templates
self.pusher_types["email"] = self._create_email_pusher
@ -78,6 +83,6 @@ class PusherFactory(object):
if "data" in pusherdict and "brand" in pusherdict["data"]:
app_name = pusherdict["data"]["brand"]
else:
app_name = self.hs.config.email_app_name
app_name = self.config.email_app_name
return app_name

View file

@ -15,6 +15,7 @@
# limitations under the License.
import logging
from typing import Set
from pkg_resources import (
DistributionNotFound,
@ -47,9 +48,9 @@ REQUIREMENTS = [
"idna>=2.5",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
# our logcontext handling relies on the ability to cancel inlineCallbacks
# (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
"Twisted>=18.7.0",
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
"Twisted>=18.9.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
@ -97,7 +98,7 @@ CONDITIONAL_REQUIREMENTS = {
"jwt": ["pyjwt>=1.6.4"],
}
ALL_OPTIONAL_REQUIREMENTS = set()
ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
@ -147,7 +148,13 @@ def check_requirements(for_feature=None):
)
except DistributionNotFound:
deps_needed.append(dependency)
errors.append("Needed %s but it was not installed" % (dependency,))
if for_feature:
errors.append(
"Needed %s for the '%s' feature but it was not installed"
% (dependency, for_feature)
)
else:
errors.append("Needed %s but it was not installed" % (dependency,))
if not for_feature:
# Check the optional dependencies are up to date. We allow them to not be
@ -168,8 +175,8 @@ def check_requirements(for_feature=None):
pass
if deps_needed:
for e in errors:
logging.error(e)
for err in errors:
logging.error(err)
raise DependencyException(deps_needed)

View file

@ -28,6 +28,11 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
from synapse.logging.opentracing import (
inject_active_span_byte_dict,
trace,
trace_servlet,
)
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import random_string
@ -128,6 +133,7 @@ class ReplicationEndpoint(object):
client = hs.get_simple_http_client()
@trace(opname="outgoing_replication_request")
@defer.inlineCallbacks
def send_request(**kwargs):
data = yield cls._serialize_payload(**kwargs)
@ -165,8 +171,10 @@ class ReplicationEndpoint(object):
# have a good idea that the request has either succeeded or failed on
# the master, and so whether we should clean up or not.
while True:
headers = {}
inject_active_span_byte_dict(headers, None, check_destination=False)
try:
result = yield request_func(uri, data)
result = yield request_func(uri, data, headers=headers)
break
except CodeMessageException as e:
if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
@ -205,7 +213,12 @@ class ReplicationEndpoint(object):
args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
http_server.register_paths(method, [pattern], handler, self.__class__.__name__)
handler = trace_servlet(self.__class__.__name__, extract_context=True)(handler)
# We don't let register paths trace this servlet using the default tracing
# options because we wish to extract the context explicitly.
http_server.register_paths(
method, [pattern], handler, self.__class__.__name__, trace=False
)
def _cached_handler(self, request, txn_id, **kwargs):
"""Called on new incoming requests when caching is enabled. Checks

View file

@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
event_and_contexts, backfilled
)
return (200, {})
return 200, {}
class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
result = yield self.registry.on_edu(edu_type, origin, edu_content)
return (200, result)
return 200, result
class ReplicationGetQueryRestServlet(ReplicationEndpoint):
@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
result = yield self.registry.on_query(query_type, args)
return (200, result)
return 200, result
class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
def _handle_request(self, request, room_id):
yield self.store.clean_room_for_join(room_id)
return (200, {})
return 200, {}
def register_servlets(hs, http_server):

View file

@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
user_id, device_id, initial_display_name, is_guest
)
return (200, {"device_id": device_id, "access_token": access_token})
return 200, {"device_id": device_id, "access_token": access_token}
def register_servlets(hs, http_server):

View file

@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
remote_room_hosts, room_id, user_id, event_content
)
return (200, {})
return 200, {}
class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
yield self.store.locally_reject_invite(user_id, room_id)
ret = {}
return (200, ret)
return 200, ret
class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
@ -202,7 +202,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
else:
raise Exception("Unrecognized change: %r", change)
return (200, {})
return 200, {}
def register_servlets(hs, http_server):

View file

@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
address=content["address"],
)
return (200, {})
return 200, {}
class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
@ -106,7 +106,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
self.registration_handler = hs.get_registration_handler()
@staticmethod
def _serialize_payload(user_id, auth_result, access_token, bind_email, bind_msisdn):
def _serialize_payload(user_id, auth_result, access_token):
"""
Args:
user_id (str): The user ID that consented
@ -114,17 +114,8 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
registered user.
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
bind_email (bool): Whether to bind the email with the identity
server
bind_msisdn (bool): Whether to bind the msisdn with the identity
server
"""
return {
"auth_result": auth_result,
"access_token": access_token,
"bind_email": bind_email,
"bind_msisdn": bind_msisdn,
}
return {"auth_result": auth_result, "access_token": access_token}
@defer.inlineCallbacks
def _handle_request(self, request, user_id):
@ -132,18 +123,12 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
auth_result = content["auth_result"]
access_token = content["access_token"]
bind_email = content["bind_email"]
bind_msisdn = content["bind_msisdn"]
yield self.registration_handler.post_registration_actions(
user_id=user_id,
auth_result=auth_result,
access_token=access_token,
bind_email=bind_email,
bind_msisdn=bind_msisdn,
user_id=user_id, auth_result=auth_result, access_token=access_token
)
return (200, {})
return 200, {}
def register_servlets(hs, http_server):

View file

@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)
return (200, {})
return 200, {}
def register_servlets(hs, http_server):

View file

@ -158,7 +158,7 @@ class Stream(object):
updates, current_token = yield self.get_updates_since(self.last_token)
self.last_token = current_token
return (updates, current_token)
return updates, current_token
@defer.inlineCallbacks
def get_updates_since(self, from_token):
@ -172,14 +172,14 @@ class Stream(object):
sent over the replication steam.
"""
if from_token in ("NOW", "now"):
return ([], self.upto_token)
return [], self.upto_token
current_token = self.upto_token
from_token = int(from_token)
if from_token == current_token:
return ([], current_token)
return [], current_token
if self._LIMITED:
rows = yield self.update_function(
@ -198,7 +198,7 @@ class Stream(object):
if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
raise Exception("stream %s has fallen behind" % (self.NAME))
return (updates, current_token)
return updates, current_token
def current_token(self):
"""Gets the current token of the underlying streams. Should be provided

View file

@ -4,6 +4,6 @@
<a href="{{ link }}">{{ link }}</a>
<p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p>
<p>If this was not you, <strong>do not</strong> click the link above and instead contact your server administrator. Thank you.</p>
</body>
</html>

View file

@ -3,5 +3,5 @@ was you, please click the link below to confirm resetting your password:
{{ link }}
If this was not you, please disregard this email and contact your server
administrator. Thank you.
If this was not you, DO NOT click the link above and instead contact your
server administrator. Thank you.

View file

@ -1,6 +1,8 @@
<html>
<head></head>
<body>
<p>{{ failure_reason }}. Your password has not been reset.</p>
<p>The request failed for the following reason: {{ failure_reason }}.</p>
<p>Your password has not been reset.</p>
</body>
</html>

View file

@ -0,0 +1,11 @@
<html>
<body>
<p>You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:</p>
<a href="{{ link }}">Verify Your Email Address</a>
<p>If this was not you, you can safely disregard this email.</p>
<p>Thank you.</p>
</body>
</html>

View file

@ -0,0 +1,10 @@
Hello there,
You have asked us to register this email with a new Matrix account. If this
was you, please click the link below to confirm your email address:
{{ link }}
If this was not you, you can safely disregard this email.
Thank you.

View file

@ -0,0 +1,6 @@
<html>
<head></head>
<body>
<p>Validation failed for the following reason: {{ failure_reason }}.</p>
</body>
</html>

Some files were not shown because too many files have changed in this diff Show more