Merge remote-tracking branch 'upstream/release-v1.39'

This commit is contained in:
Tulir Asokan 2021-07-20 16:33:01 +03:00
commit 9754df5623
260 changed files with 3981 additions and 2390 deletions

View file

@ -47,7 +47,7 @@ try:
except ImportError:
pass
__version__ = "1.38.0"
__version__ = "1.39.0rc1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View file

@ -62,16 +62,14 @@ class Auth:
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self._account_validity_handler = hs.get_account_validity_handler()
self.token_cache = LruCache(
self.token_cache: LruCache[str, Tuple[str, bool]] = LruCache(
10000, "token_cache"
) # type: LruCache[str, Tuple[str, bool]]
)
self._auth_blocking = AuthBlocking(self.hs)
self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled
)
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
self._macaroon_secret_key = hs.config.macaroon_secret_key
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
@ -187,12 +185,17 @@ class Auth:
shadow_banned = user_info.shadow_banned
# Deny the request if the user account has expired.
if self._account_validity_enabled and not allow_expired:
if await self.store.is_account_expired(
user_info.user_id, self.clock.time_msec()
if not allow_expired:
if await self._account_validity_handler.is_user_expired(
user_info.user_id
):
# Raise the error if either an account validity module has determined
# the account has expired, or the legacy account validity
# implementation is enabled and determined the account has expired
raise AuthError(
403, "User account has expired", errcode=Codes.EXPIRED_ACCOUNT
403,
"User account has expired",
errcode=Codes.EXPIRED_ACCOUNT,
)
device_id = user_info.device_id
@ -240,6 +243,37 @@ class Auth:
except KeyError:
raise MissingClientTokenError()
async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str
):
"""Validates that the app service is allowed to control
the given user.
Args:
app_service: The app service that controls the user
user_id: The author MXID that the app service is controlling
Raises:
AuthError: If the application service is not allowed to control the user
(user namespace regex does not match, wrong homeserver, etc)
or if the user has not been registered yet.
"""
# It's ok if the app service is trying to use the sender from their registration
if app_service.sender == user_id:
pass
# Check to make sure the app service is allowed to control the user
elif not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user (%s)." % user_id,
)
# Check to make sure the user is already registered on the homeserver
elif not (await self.store.get_user_by_id(user_id)):
raise AuthError(
403, "Application service has not registered this user (%s)" % user_id
)
async def _get_appservice_user_id(
self, request: Request
) -> Tuple[Optional[str], Optional[ApplicationService]]:
@ -261,13 +295,11 @@ class Auth:
return app_service.sender, app_service
user_id = request.args[b"user_id"][0].decode("utf8")
await self.validate_appservice_can_control_user_id(app_service, user_id)
if app_service.sender == user_id:
return app_service.sender, app_service
if not app_service.is_interested_in_user(user_id):
raise AuthError(403, "Application service cannot masquerade as this user.")
if not (await self.store.get_user_by_id(user_id)):
raise AuthError(403, "Application service has not registered this user")
return user_id, app_service
async def get_user_by_access_token(

View file

@ -118,7 +118,7 @@ class RedirectException(CodeMessageException):
super().__init__(code=http_code, msg=msg)
self.location = location
self.cookies = [] # type: List[bytes]
self.cookies: List[bytes] = []
class SynapseError(CodeMessageException):
@ -160,7 +160,7 @@ class ProxiedRequestError(SynapseError):
):
super().__init__(code, msg, errcode)
if additional_fields is None:
self._additional_fields = {} # type: Dict
self._additional_fields: Dict = {}
else:
self._additional_fields = dict(additional_fields)

View file

@ -289,7 +289,7 @@ class Filter:
room_id = None
ev_type = "m.presence"
contains_url = False
labels = [] # type: List[str]
labels: List[str] = []
else:
sender = event.get("sender", None)
if not sender:

View file

@ -46,9 +46,7 @@ class Ratelimiter:
# * How many times an action has occurred since a point in time
# * The point in time
# * The rate_hz of this particular entry. This can vary per request
self.actions = (
OrderedDict()
) # type: OrderedDict[Hashable, Tuple[float, int, float]]
self.actions: OrderedDict[Hashable, Tuple[float, int, float]] = OrderedDict()
async def can_do_action(
self,

View file

@ -195,7 +195,7 @@ class RoomVersions:
)
KNOWN_ROOM_VERSIONS = {
KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
v.identifier: v
for v in (
RoomVersions.V1,
@ -209,4 +209,4 @@ KNOWN_ROOM_VERSIONS = {
RoomVersions.V7,
)
# Note that we do not include MSC2043 here unless it is enabled in the config.
} # type: Dict[str, RoomVersion]
}

View file

@ -38,6 +38,7 @@ from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.logging.context import PreserveLoggingContext
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats
@ -368,6 +369,7 @@ async def start(hs: "HomeServer"):
module(config=config, api=module_api)
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
# If we've configured an expiry time for caches, start the background job now.
setup_expire_lru_cache_entries(hs)

View file

@ -270,7 +270,7 @@ class GenericWorkerServer(HomeServer):
site_tag = port
# We always include a health resource.
resources = {"/health": HealthResource()} # type: Dict[str, IResource]
resources: Dict[str, IResource] = {"/health": HealthResource()}
for res in listener_config.http_options.resources:
for name in res.names:
@ -395,10 +395,8 @@ class GenericWorkerServer(HomeServer):
elif listener.type == "metrics":
if not self.config.enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
"Metrics listener configured, but "
"enable_metrics is not True!"
)
else:
_base.listen_metrics(listener.bind_addresses, listener.port)

View file

@ -305,10 +305,8 @@ class SynapseHomeServer(HomeServer):
elif listener.type == "metrics":
if not self.config.enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
"Metrics listener configured, but "
"enable_metrics is not True!"
)
else:
_base.listen_metrics(listener.bind_addresses, listener.port)

View file

@ -71,6 +71,8 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
# General statistics
#
store = hs.get_datastore()
stats["homeserver"] = hs.config.server_name
stats["server_context"] = hs.config.server_context
stats["timestamp"] = now
@ -79,34 +81,38 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
stats["python_version"] = "{}.{}.{}".format(
version.major, version.minor, version.micro
)
stats["total_users"] = await hs.get_datastore().count_all_users()
stats["total_users"] = await store.count_all_users()
total_nonbridged_users = await hs.get_datastore().count_nonbridged_users()
total_nonbridged_users = await store.count_nonbridged_users()
stats["total_nonbridged_users"] = total_nonbridged_users
daily_user_type_results = await hs.get_datastore().count_daily_user_type()
daily_user_type_results = await store.count_daily_user_type()
for name, count in daily_user_type_results.items():
stats["daily_user_type_" + name] = count
room_count = await hs.get_datastore().get_room_count()
room_count = await store.get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = await hs.get_datastore().count_daily_users()
stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users()
daily_active_e2ee_rooms = await hs.get_datastore().count_daily_active_e2ee_rooms()
stats["daily_active_users"] = await store.count_daily_users()
stats["monthly_active_users"] = await store.count_monthly_users()
daily_active_e2ee_rooms = await store.count_daily_active_e2ee_rooms()
stats["daily_active_e2ee_rooms"] = daily_active_e2ee_rooms
stats["daily_e2ee_messages"] = await hs.get_datastore().count_daily_e2ee_messages()
daily_sent_e2ee_messages = await hs.get_datastore().count_daily_sent_e2ee_messages()
stats["daily_e2ee_messages"] = await store.count_daily_e2ee_messages()
daily_sent_e2ee_messages = await store.count_daily_sent_e2ee_messages()
stats["daily_sent_e2ee_messages"] = daily_sent_e2ee_messages
stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms()
stats["daily_messages"] = await hs.get_datastore().count_daily_messages()
daily_sent_messages = await hs.get_datastore().count_daily_sent_messages()
stats["daily_active_rooms"] = await store.count_daily_active_rooms()
stats["daily_messages"] = await store.count_daily_messages()
daily_sent_messages = await store.count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
r30_results = await hs.get_datastore().count_r30_users()
r30_results = await store.count_r30_users()
for name, count in r30_results.items():
stats["r30_users_" + name] = count
r30v2_results = await store.count_r30_users()
for name, count in r30v2_results.items():
stats["r30v2_users_" + name] = count
stats["cache_factor"] = hs.config.caches.global_factor
stats["event_cache_size"] = hs.config.caches.event_cache_size
@ -115,8 +121,8 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
#
# This only reports info about the *main* database.
stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__
stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version
stats["database_engine"] = store.db_pool.engine.module.__name__
stats["database_server_version"] = store.db_pool.engine.server_version
#
# Logging configuration

View file

@ -88,9 +88,9 @@ class ApplicationServiceApi(SimpleHttpClient):
super().__init__(hs)
self.clock = hs.get_clock()
self.protocol_meta_cache = ResponseCache(
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
) # type: ResponseCache[Tuple[str, str]]
)
async def query_user(self, service, user_id):
if service.url is None:

View file

@ -18,6 +18,21 @@ class AccountValidityConfig(Config):
section = "account_validity"
def read_config(self, config, **kwargs):
"""Parses the old account validity config. The config format looks like this:
account_validity:
enabled: true
period: 6w
renew_at: 1w
renew_email_subject: "Renew your %(app)s account"
template_dir: "res/templates"
account_renewed_html_path: "account_renewed.html"
invalid_token_html_path: "invalid_token.html"
We expect admins to use modules for this feature (which is why it doesn't appear
in the sample config file), but we want to keep support for it around for a bit
for backwards compatibility.
"""
account_validity_config = config.get("account_validity") or {}
self.account_validity_enabled = account_validity_config.get("enabled", False)
self.account_validity_renew_by_email_enabled = (
@ -75,90 +90,3 @@ class AccountValidityConfig(Config):
],
account_validity_template_dir,
)
def generate_config_section(self, **kwargs):
return """\
## Account Validity ##
# Optional account validity configuration. This allows for accounts to be denied
# any request after a given period.
#
# Once this feature is enabled, Synapse will look for registered users without an
# expiration date at startup and will add one to every account it found using the
# current settings at that time.
# This means that, if a validity period is set, and Synapse is restarted (it will
# then derive an expiration date from the current validity period), and some time
# after that the validity period changes and Synapse is restarted, the users'
# expiration dates won't be updated unless their account is manually renewed. This
# date will be randomly selected within a range [now + period - d ; now + period],
# where d is equal to 10% of the validity period.
#
account_validity:
# The account validity feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# The period after which an account is valid after its registration. When
# renewing the account, its validity period will be extended by this amount
# of time. This parameter is required when using the account validity
# feature.
#
#period: 6w
# The amount of time before an account's expiry date at which Synapse will
# send an email to the account's email address with a renewal link. By
# default, no such emails are sent.
#
# If you enable this setting, you will also need to fill out the 'email' and
# 'public_baseurl' configuration sections.
#
#renew_at: 1w
# The subject of the email sent out with the renewal link. '%(app)s' can be
# used as a placeholder for the 'app_name' parameter from the 'email'
# section.
#
# Note that the placeholder must be written '%(app)s', including the
# trailing 's'.
#
# If this is not set, a default value is used.
#
#renew_email_subject: "Renew your %(app)s account"
# Directory in which Synapse will try to find templates for the HTML files to
# serve to the user when trying to renew an account. If not set, default
# templates from within the Synapse package will be used.
#
# The currently available templates are:
#
# * account_renewed.html: Displayed to the user after they have successfully
# renewed their account.
#
# * account_previously_renewed.html: Displayed to the user if they attempt to
# renew their account with a token that is valid, but that has already
# been used. In this case the account is not renewed again.
#
# * invalid_token.html: Displayed to the user when they try to renew an account
# with an unknown or invalid renewal token.
#
# See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for
# default template contents.
#
# The file name of some of these templates can be configured below for legacy
# reasons.
#
#template_dir: "res/templates"
# A custom file name for the 'account_renewed.html' template.
#
# If not set, the file is assumed to be named "account_renewed.html".
#
#account_renewed_html_path: "account_renewed.html"
# A custom file name for the 'invalid_token.html' template.
#
# If not set, the file is assumed to be named "invalid_token.html".
#
#invalid_token_html_path: "invalid_token.html"
"""

View file

@ -57,14 +57,14 @@ def load_appservices(hostname, config_files):
return []
# Dicts of value -> filename
seen_as_tokens = {} # type: Dict[str, str]
seen_ids = {} # type: Dict[str, str]
seen_as_tokens: Dict[str, str] = {}
seen_ids: Dict[str, str] = {}
appservices = []
for config_file in config_files:
try:
with open(config_file, "r") as f:
with open(config_file) as f:
appservice = _load_appservice(hostname, yaml.safe_load(f), config_file)
if appservice.id in seen_ids:
raise ConfigError(

View file

@ -25,7 +25,7 @@ from ._base import Config, ConfigError
_CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
# Map from canonicalised cache name to cache.
_CACHES = {} # type: Dict[str, Callable[[float], None]]
_CACHES: Dict[str, Callable[[float], None]] = {}
# a lock on the contents of _CACHES
_CACHES_LOCK = threading.Lock()
@ -157,7 +157,7 @@ class CacheConfig(Config):
self.event_cache_size = self.parse_size(
config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
)
self.cache_factors = {} # type: Dict[str, float]
self.cache_factors: Dict[str, float] = {}
cache_config = config.get("caches") or {}
self.global_factor = cache_config.get(

View file

@ -134,9 +134,9 @@ class EmailConfig(Config):
# trusted_third_party_id_servers does not contain a scheme whereas
# account_threepid_delegate_email is expected to. Presume https
self.account_threepid_delegate_email = (
self.account_threepid_delegate_email: Optional[str] = (
"https://" + first_trusted_identity_server
) # type: Optional[str]
)
self.using_identity_server_from_trusted_list = True
else:
raise ConfigError(

View file

@ -25,10 +25,10 @@ class ExperimentalConfig(Config):
experimental = config.get("experimental_features") or {}
# MSC2858 (multiple SSO identity providers)
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
self.msc2858_enabled: bool = experimental.get("msc2858_enabled", False)
# MSC3026 (busy presence state)
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
# MSC2716 (backfill existing history)
self.msc2716_enabled = experimental.get("msc2716_enabled", False) # type: bool
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)

View file

@ -22,7 +22,7 @@ class FederationConfig(Config):
def read_config(self, config, **kwargs):
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None # type: Optional[dict]
self.federation_domain_whitelist: Optional[dict] = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
if federation_domain_whitelist is not None:

View file

@ -460,7 +460,7 @@ def _parse_oidc_config_dict(
) from e
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
client_secret_jwt_key = None # type: Optional[OidcProviderClientSecretJwtKey]
client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None
if client_secret_jwt_key_config is not None:
keyfile = client_secret_jwt_key_config.get("key_file")
if keyfile:

View file

@ -25,7 +25,7 @@ class PasswordAuthProviderConfig(Config):
section = "authproviders"
def read_config(self, config, **kwargs):
self.password_providers = [] # type: List[Any]
self.password_providers: List[Any] = []
providers = []
# We want to be backwards compatible with the old `ldap_config`

View file

@ -62,7 +62,7 @@ def parse_thumbnail_requirements(thumbnail_sizes):
Dictionary mapping from media type string to list of
ThumbnailRequirement tuples.
"""
requirements = {} # type: Dict[str, List]
requirements: Dict[str, List] = {}
for size in thumbnail_sizes:
width = size["width"]
height = size["height"]
@ -142,7 +142,7 @@ class ContentRepositoryConfig(Config):
#
# We don't create the storage providers here as not all workers need
# them to be started.
self.media_storage_providers = [] # type: List[tuple]
self.media_storage_providers: List[tuple] = []
for i, provider_config in enumerate(storage_providers):
# We special case the module "file_system" so as not to need to

View file

@ -505,7 +505,7 @@ class ServerConfig(Config):
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
self.retention_purge_jobs: List[Dict[str, Optional[int]]] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
@ -688,23 +688,21 @@ class ServerConfig(Config):
# not included in the sample configuration file on purpose as it's a temporary
# hack, so that some users can trial the new defaults without impacting every
# user on the homeserver.
users_new_default_push_rules = (
users_new_default_push_rules: list = (
config.get("users_new_default_push_rules") or []
) # type: list
)
if not isinstance(users_new_default_push_rules, list):
raise ConfigError("'users_new_default_push_rules' must be a list")
# Turn the list into a set to improve lookup speed.
self.users_new_default_push_rules = set(
users_new_default_push_rules
) # type: set
self.users_new_default_push_rules: set = set(users_new_default_push_rules)
# Whitelist of domain names that given next_link parameters must have
next_link_domain_whitelist = config.get(
next_link_domain_whitelist: Optional[List[str]] = config.get(
"next_link_domain_whitelist"
) # type: Optional[List[str]]
)
self.next_link_domain_whitelist = None # type: Optional[Set[str]]
self.next_link_domain_whitelist: Optional[Set[str]] = None
if next_link_domain_whitelist is not None:
if not isinstance(next_link_domain_whitelist, list):
raise ConfigError("'next_link_domain_whitelist' must be a list")

View file

@ -34,7 +34,7 @@ class SpamCheckerConfig(Config):
section = "spamchecker"
def read_config(self, config, **kwargs):
self.spam_checkers = [] # type: List[Tuple[Any, Dict]]
self.spam_checkers: List[Tuple[Any, Dict]] = []
spam_checkers = config.get("spam_checker") or []
if isinstance(spam_checkers, dict):

View file

@ -39,7 +39,7 @@ class SSOConfig(Config):
section = "sso"
def read_config(self, config, **kwargs):
sso_config = config.get("sso") or {} # type: Dict[str, Any]
sso_config: Dict[str, Any] = config.get("sso") or {}
# The sso-specific template_dir
self.sso_template_dir = sso_config.get("template_dir")

View file

@ -38,13 +38,9 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
self.stats_bucket_size = 86400 * 1000
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
self.stats_bucket_size = self.parse_duration(
stats_config.get("bucket_size", "1d")
)
if not self.stats_enabled:
logger.warning(ROOM_STATS_DISABLED_WARN)
@ -59,9 +55,4 @@ class StatsConfig(Config):
# correctly.
#
#enabled: false
# The size of each timeslice in the room_stats_historical and
# user_stats_historical tables, as a time period. Defaults to "1d".
#
#bucket_size: 1h
"""

View file

@ -28,18 +28,3 @@ class ThirdPartyRulesConfig(Config):
self.third_party_event_rules = load_module(
provider, ("third_party_event_rules",)
)
def generate_config_section(self, **kwargs):
return """\
# Server admins can define a Python module that implements extra rules for
# allowing or denying incoming events. In order to work, this module needs to
# override the methods defined in synapse/events/third_party_rules.py.
#
# This feature is designed to be used in closed federations only, where each
# participating server enforces the same rules.
#
#third_party_event_rules:
# module: "my_custom_project.SuperRulesSet"
# config:
# example_option: 'things'
"""

View file

@ -66,10 +66,8 @@ class TlsConfig(Config):
if self.federation_client_minimum_tls_version == "1.3":
if getattr(SSL, "OP_NO_TLSv1_3", None) is None:
raise ConfigError(
(
"federation_client_minimum_tls_version cannot be 1.3, "
"your OpenSSL does not support it"
)
"federation_client_minimum_tls_version cannot be 1.3, "
"your OpenSSL does not support it"
)
# Whitelist of domains to not verify certificates for
@ -80,7 +78,7 @@ class TlsConfig(Config):
fed_whitelist_entries = []
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = [] # type: List[Pattern]
self.federation_certificate_verification_whitelist: List[Pattern] = []
for entry in fed_whitelist_entries:
try:
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
@ -132,8 +130,8 @@ class TlsConfig(Config):
"use_insecure_ssl_client_just_for_testing_do_not_use"
)
self.tls_certificate = None # type: Optional[crypto.X509]
self.tls_private_key = None # type: Optional[crypto.PKey]
self.tls_certificate: Optional[crypto.X509] = None
self.tls_private_key: Optional[crypto.PKey] = None
def is_disk_cert_valid(self, allow_self_signed=True):
"""

View file

@ -170,11 +170,13 @@ class Keyring:
)
self._key_fetchers = key_fetchers
self._server_queue = BatchingQueue(
self._server_queue: BatchingQueue[
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
] = BatchingQueue(
"keyring_server",
clock=hs.get_clock(),
process_batch_callback=self._inner_fetch_key_requests,
) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]]
)
async def verify_json_for_server(
self,
@ -330,7 +332,7 @@ class Keyring:
# First we need to deduplicate requests for the same key. We do this by
# taking the *maximum* requested `minimum_valid_until_ts` for each pair
# of server name/key ID.
server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]]
server_to_key_to_ts: Dict[str, Dict[str, int]] = {}
for request in requests:
by_server = server_to_key_to_ts.setdefault(request.server_name, {})
for key_id in request.key_ids:
@ -355,7 +357,7 @@ class Keyring:
# We now convert the returned list of results into a map from server
# name to key ID to FetchKeyResult, to return.
to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]]
to_return: Dict[str, Dict[str, FetchKeyResult]] = {}
for (request, results) in zip(deduped_requests, results_per_request):
to_return_by_server = to_return.setdefault(request.server_name, {})
for key_id, key_result in results.items():
@ -455,7 +457,7 @@ class StoreKeyFetcher(KeyFetcher):
)
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
keys = {} # type: Dict[str, Dict[str, FetchKeyResult]]
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
for (server_name, key_id), key in res.items():
keys.setdefault(server_name, {})[key_id] = key
return keys
@ -603,7 +605,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
).addErrback(unwrapFirstError)
)
union_of_keys = {} # type: Dict[str, Dict[str, FetchKeyResult]]
union_of_keys: Dict[str, Dict[str, FetchKeyResult]] = {}
for result in results:
for server_name, keys in result.items():
union_of_keys.setdefault(server_name, {}).update(keys)
@ -656,8 +658,8 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
except HttpResponseException as e:
raise KeyLookupError("Remote server returned an error: %s" % (e,))
keys = {} # type: Dict[str, Dict[str, FetchKeyResult]]
added_keys = [] # type: List[Tuple[str, str, FetchKeyResult]]
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
added_keys: List[Tuple[str, str, FetchKeyResult]] = []
time_now_ms = self.clock.time_msec()
@ -805,7 +807,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
Raises:
KeyLookupError if there was a problem making the lookup
"""
keys = {} # type: Dict[str, FetchKeyResult]
keys: Dict[str, FetchKeyResult] = {}
for requested_key_id in key_ids:
# we may have found this key as a side-effect of asking for another.

View file

@ -48,6 +48,9 @@ def check(
room_version_obj: the version of the room
event: the event being checked.
auth_events: the existing room state.
do_sig_check: True if it should be verified that the sending server
signed the event.
do_size_check: True if the size of the event fields should be verified.
Raises:
AuthError if the checks fail
@ -528,7 +531,7 @@ def _check_power_levels(
user_level = get_user_power_level(event.user_id, auth_events)
# Check other levels:
levels_to_check = [
levels_to_check: List[Tuple[str, Optional[str]]] = [
("users_default", None),
("events_default", None),
("state_default", None),
@ -536,7 +539,7 @@ def _check_power_levels(
("redact", None),
("kick", None),
("invite", None),
] # type: List[Tuple[str, Optional[str]]]
]
old_list = current_state.content.get("users", {})
for user in set(list(old_list) + list(user_list)):
@ -566,12 +569,12 @@ def _check_power_levels(
new_loc = new_loc.get(dir, {})
if level_to_check in old_loc:
old_level = int(old_loc[level_to_check]) # type: Optional[int]
old_level: Optional[int] = int(old_loc[level_to_check])
else:
old_level = None
if level_to_check in new_loc:
new_level = int(new_loc[level_to_check]) # type: Optional[int]
new_level: Optional[int] = int(new_loc[level_to_check])
else:
new_level = None

View file

@ -105,28 +105,28 @@ class _EventInternalMetadata:
self._dict = dict(internal_metadata_dict)
# the stream ordering of this event. None, until it has been persisted.
self.stream_ordering = None # type: Optional[int]
self.stream_ordering: Optional[int] = None
# whether this event is an outlier (ie, whether we have the state at that point
# in the DAG)
self.outlier = False
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
recheck_redaction = DictProperty("recheck_redaction") # type: bool
soft_failed = DictProperty("soft_failed") # type: bool
proactively_send = DictProperty("proactively_send") # type: bool
redacted = DictProperty("redacted") # type: bool
txn_id = DictProperty("txn_id") # type: str
token_id = DictProperty("token_id") # type: int
historical = DictProperty("historical") # type: bool
out_of_band_membership: bool = DictProperty("out_of_band_membership")
send_on_behalf_of: str = DictProperty("send_on_behalf_of")
recheck_redaction: bool = DictProperty("recheck_redaction")
soft_failed: bool = DictProperty("soft_failed")
proactively_send: bool = DictProperty("proactively_send")
redacted: bool = DictProperty("redacted")
txn_id: str = DictProperty("txn_id")
token_id: int = DictProperty("token_id")
historical: bool = DictProperty("historical")
# XXX: These are set by StreamWorkerStore._set_before_and_after.
# I'm pretty sure that these are never persisted to the database, so shouldn't
# be here
before = DictProperty("before") # type: RoomStreamToken
after = DictProperty("after") # type: RoomStreamToken
order = DictProperty("order") # type: Tuple[int, int]
before: RoomStreamToken = DictProperty("before")
after: RoomStreamToken = DictProperty("after")
order: Tuple[int, int] = DictProperty("order")
def get_dict(self) -> JsonDict:
return dict(self._dict)
@ -291,6 +291,20 @@ class EventBase(metaclass=abc.ABCMeta):
return pdu_json
def get_templated_pdu_json(self) -> JsonDict:
"""
Return a JSON object suitable for a templated event, as used in the
make_{join,leave,knock} workflow.
"""
# By using _dict directly we don't pull in signatures/unsigned.
template_json = dict(self._dict)
# The hashes (similar to the signature) need to be recalculated by the
# joining/leaving/knocking server after (potentially) modifying the
# event.
template_json.pop("hashes")
return template_json
def __set__(self, instance, value):
raise AttributeError("Unrecognized attribute %s" % (instance,))

View file

@ -132,12 +132,12 @@ class EventBuilder:
format_version = self.room_version.event_format
if format_version == EventFormatVersions.V1:
# The types of auth/prev events changes between event versions.
auth_events = await self._store.add_event_hashes(
auth_event_ids
) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]]
prev_events = await self._store.add_event_hashes(
prev_event_ids
) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]]
auth_events: Union[
List[str], List[Tuple[str, Dict[str, str]]]
] = await self._store.add_event_hashes(auth_event_ids)
prev_events: Union[
List[str], List[Tuple[str, Dict[str, str]]]
] = await self._store.add_event_hashes(prev_event_ids)
else:
auth_events = auth_event_ids
prev_events = prev_event_ids
@ -156,7 +156,7 @@ class EventBuilder:
# the db)
depth = min(depth, MAX_DEPTH)
event_dict = {
event_dict: Dict[str, Any] = {
"auth_events": auth_events,
"prev_events": prev_events,
"type": self.type,
@ -166,7 +166,7 @@ class EventBuilder:
"unsigned": self.unsigned,
"depth": depth,
"prev_state": [],
} # type: Dict[str, Any]
}
if self.is_state():
event_dict["state_key"] = self._state_key

View file

@ -76,7 +76,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
"""Wrapper that loads spam checkers configured using the old configuration, and
registers the spam checker hooks they implement.
"""
spam_checkers = [] # type: List[Any]
spam_checkers: List[Any] = []
api = hs.get_module_api()
for module, config in hs.config.spam_checkers:
# Older spam checkers don't accept the `api` argument, so we
@ -239,7 +239,7 @@ class SpamChecker:
will be used as the error message returned to the user.
"""
for callback in self._check_event_for_spam_callbacks:
res = await callback(event) # type: Union[bool, str]
res: Union[bool, str] = await callback(event)
if res:
return res

View file

@ -11,16 +11,124 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
from typing import TYPE_CHECKING, Union
from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.types import Requester, StateMap
from synapse.util.async_helpers import maybe_awaitable
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
CHECK_EVENT_ALLOWED_CALLBACK = Callable[
[EventBase, StateMap[EventBase]], Awaitable[Tuple[bool, Optional[dict]]]
]
ON_CREATE_ROOM_CALLBACK = Callable[[Requester, dict, bool], Awaitable]
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK = Callable[
[str, str, StateMap[EventBase]], Awaitable[bool]
]
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
[str, StateMap[EventBase], str], Awaitable[bool]
]
def load_legacy_third_party_event_rules(hs: "HomeServer"):
"""Wrapper that loads a third party event rules module configured using the old
configuration, and registers the hooks they implement.
"""
if hs.config.third_party_event_rules is None:
return
module, config = hs.config.third_party_event_rules
api = hs.get_module_api()
third_party_rules = module(config=config, module_api=api)
# The known hooks. If a module implements a method which name appears in this set,
# we'll want to register it.
third_party_event_rules_methods = {
"check_event_allowed",
"on_create_room",
"check_threepid_can_be_invited",
"check_visibility_can_be_modified",
}
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
# We return a separate wrapper for these methods because, in order to wrap them
# correctly, we need to await its result. Therefore it doesn't make a lot of
# sense to make it go through the run() wrapper.
if f.__name__ == "check_event_allowed":
# We need to wrap check_event_allowed because its old form would return either
# a boolean or a dict, but now we want to return the dict separately from the
# boolean.
async def wrap_check_event_allowed(
event: EventBase,
state_events: StateMap[EventBase],
) -> Tuple[bool, Optional[dict]]:
# We've already made sure f is not None above, but mypy doesn't do well
# across function boundaries so we need to tell it f is definitely not
# None.
assert f is not None
res = await f(event, state_events)
if isinstance(res, dict):
return True, res
else:
return res, None
return wrap_check_event_allowed
if f.__name__ == "on_create_room":
# We need to wrap on_create_room because its old form would return a boolean
# if the room creation is denied, but now we just want it to raise an
# exception.
async def wrap_on_create_room(
requester: Requester, config: dict, is_requester_admin: bool
) -> None:
# We've already made sure f is not None above, but mypy doesn't do well
# across function boundaries so we need to tell it f is definitely not
# None.
assert f is not None
res = await f(requester, config, is_requester_admin)
if res is False:
raise SynapseError(
403,
"Room creation forbidden with these parameters",
)
return wrap_on_create_room
def run(*args, **kwargs):
# mypy doesn't do well across function boundaries so we need to tell it
# f is definitely not None.
assert f is not None
return maybe_awaitable(f(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks = {
hook: async_wrapper(getattr(third_party_rules, hook, None))
for hook in third_party_event_rules_methods
}
api.register_third_party_rules_callbacks(**hooks)
class ThirdPartyEventRules:
"""Allows server admins to provide a Python module implementing an extra
@ -35,36 +143,65 @@ class ThirdPartyEventRules:
self.store = hs.get_datastore()
module = None
config = None
if hs.config.third_party_event_rules:
module, config = hs.config.third_party_event_rules
self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = []
self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = []
self._check_threepid_can_be_invited_callbacks: List[
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK
] = []
self._check_visibility_can_be_modified_callbacks: List[
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
] = []
if module is not None:
self.third_party_rules = module(
config=config,
module_api=hs.get_module_api(),
def register_third_party_rules_callbacks(
self,
check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None,
on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None,
check_threepid_can_be_invited: Optional[
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK
] = None,
check_visibility_can_be_modified: Optional[
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
] = None,
):
"""Register callbacks from modules for each hook."""
if check_event_allowed is not None:
self._check_event_allowed_callbacks.append(check_event_allowed)
if on_create_room is not None:
self._on_create_room_callbacks.append(on_create_room)
if check_threepid_can_be_invited is not None:
self._check_threepid_can_be_invited_callbacks.append(
check_threepid_can_be_invited,
)
if check_visibility_can_be_modified is not None:
self._check_visibility_can_be_modified_callbacks.append(
check_visibility_can_be_modified,
)
async def check_event_allowed(
self, event: EventBase, context: EventContext
) -> Union[bool, dict]:
) -> Tuple[bool, Optional[dict]]:
"""Check if a provided event should be allowed in the given context.
The module can return:
* True: the event is allowed.
* False: the event is not allowed, and should be rejected with M_FORBIDDEN.
* a dict: replacement event data.
If the event is allowed, the module can also return a dictionary to use as a
replacement for the event.
Args:
event: The event to be checked.
context: The context of the event.
Returns:
The result from the ThirdPartyRules module, as above
The result from the ThirdPartyRules module, as above.
"""
if self.third_party_rules is None:
return True
# Bail out early without hitting the store if we don't have any callbacks to run.
if len(self._check_event_allowed_callbacks) == 0:
return True, None
prev_state_ids = await context.get_prev_state_ids()
@ -77,29 +214,46 @@ class ThirdPartyEventRules:
# the hashes and signatures.
event.freeze()
return await self.third_party_rules.check_event_allowed(event, state_events)
for callback in self._check_event_allowed_callbacks:
try:
res, replacement_data = await callback(event, state_events)
except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
continue
# Return if the event shouldn't be allowed or if the module came up with a
# replacement dict for the event.
if res is False:
return res, None
elif isinstance(replacement_data, dict):
return True, replacement_data
return True, None
async def on_create_room(
self, requester: Requester, config: dict, is_requester_admin: bool
) -> bool:
"""Intercept requests to create room to allow, deny or update the
request config.
) -> None:
"""Intercept requests to create room to maybe deny it (via an exception) or
update the request config.
Args:
requester
config: The creation config from the client.
is_requester_admin: If the requester is an admin
Returns:
Whether room creation is allowed or denied.
"""
for callback in self._on_create_room_callbacks:
try:
await callback(requester, config, is_requester_admin)
except Exception as e:
# Don't silence the errors raised by this callback since we expect it to
# raise an exception to deny the creation of the room; instead make sure
# it's a SynapseError we can send to clients.
if not isinstance(e, SynapseError):
e = SynapseError(
403, "Room creation forbidden with these parameters"
)
if self.third_party_rules is None:
return True
return await self.third_party_rules.on_create_room(
requester, config, is_requester_admin
)
raise e
async def check_threepid_can_be_invited(
self, medium: str, address: str, room_id: str
@ -114,15 +268,20 @@ class ThirdPartyEventRules:
Returns:
True if the 3PID can be invited, False if not.
"""
if self.third_party_rules is None:
# Bail out early without hitting the store if we don't have any callbacks to run.
if len(self._check_threepid_can_be_invited_callbacks) == 0:
return True
state_events = await self._get_state_map_for_room(room_id)
return await self.third_party_rules.check_threepid_can_be_invited(
medium, address, state_events
)
for callback in self._check_threepid_can_be_invited_callbacks:
try:
if await callback(medium, address, state_events) is False:
return False
except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
return True
async def check_visibility_can_be_modified(
self, room_id: str, new_visibility: str
@ -137,18 +296,20 @@ class ThirdPartyEventRules:
Returns:
True if the room's visibility can be modified, False if not.
"""
if self.third_party_rules is None:
return True
check_func = getattr(
self.third_party_rules, "check_visibility_can_be_modified", None
)
if not check_func or not callable(check_func):
# Bail out early without hitting the store if we don't have any callback
if len(self._check_visibility_can_be_modified_callbacks) == 0:
return True
state_events = await self._get_state_map_for_room(room_id)
return await check_func(room_id, state_events, new_visibility)
for callback in self._check_visibility_can_be_modified_callbacks:
try:
if await callback(room_id, state_events, new_visibility) is False:
return False
except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
return True
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
"""Given a room ID, return the state events of that room.

View file

@ -86,7 +86,7 @@ class FederationClient(FederationBase):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.pdu_destination_tried = {} # type: Dict[str, Dict[str, int]]
self.pdu_destination_tried: Dict[str, Dict[str, int]] = {}
self._clock.looping_call(self._clear_tried_cache, 60 * 1000)
self.state = hs.get_state_handler()
self.transport_layer = hs.get_federation_transport_client()
@ -94,13 +94,13 @@ class FederationClient(FederationBase):
self.hostname = hs.hostname
self.signing_key = hs.signing_key
self._get_pdu_cache = ExpiringCache(
self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
) # type: ExpiringCache[str, EventBase]
)
def _clear_tried_cache(self):
"""Clear pdu_destination_tried cache"""
@ -293,10 +293,10 @@ class FederationClient(FederationBase):
transaction_data,
)
pdu_list = [
pdu_list: List[EventBase] = [
event_from_pdu_json(p, room_version, outlier=outlier)
for p in transaction_data["pdus"]
] # type: List[EventBase]
]
if pdu_list and pdu_list[0]:
pdu = pdu_list[0]

View file

@ -122,12 +122,12 @@ class FederationServer(FederationBase):
# origins that we are currently processing a transaction from.
# a dict from origin to txn id.
self._active_transactions = {} # type: Dict[str, str]
self._active_transactions: Dict[str, str] = {}
# We cache results for transaction with the same ID
self._transaction_resp_cache = ResponseCache(
self._transaction_resp_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "fed_txn_handler", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
)
self.transaction_actions = TransactionActions(self.store)
@ -135,12 +135,12 @@ class FederationServer(FederationBase):
# We cache responses to state queries, as they take a while and often
# come in waves.
self._state_resp_cache = ResponseCache(
hs.get_clock(), "state_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, Optional[str]]]
self._state_ids_resp_cache = ResponseCache(
self._state_resp_cache: ResponseCache[
Tuple[str, Optional[str]]
] = ResponseCache(hs.get_clock(), "state_resp", timeout_ms=30000)
self._state_ids_resp_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "state_ids_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
)
self._federation_metrics_domains = (
hs.config.federation.federation_metrics_domains
@ -337,7 +337,7 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
pdus_by_room = {} # type: Dict[str, List[EventBase]]
pdus_by_room: Dict[str, List[EventBase]] = {}
newest_pdu_ts = 0
@ -516,9 +516,9 @@ class FederationServer(FederationBase):
self, room_id: str, event_id: Optional[str]
) -> Dict[str, list]:
if event_id:
pdus = await self.handler.get_state_for_pdu(
pdus: Iterable[EventBase] = await self.handler.get_state_for_pdu(
room_id, event_id
) # type: Iterable[EventBase]
)
else:
pdus = (await self.state.get_current_state(room_id)).values()
@ -562,8 +562,7 @@ class FederationServer(FederationBase):
raise IncompatibleRoomVersionError(room_version=room_version)
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
return {"event": pdu.get_templated_pdu_json(), "room_version": room_version}
async def on_invite_request(
self, origin: str, content: JsonDict, room_version_id: str
@ -611,8 +610,7 @@ class FederationServer(FederationBase):
room_version = await self.store.get_room_version_id(room_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
return {"event": pdu.get_templated_pdu_json(), "room_version": room_version}
async def on_send_leave_request(
self, origin: str, content: JsonDict, room_id: str
@ -659,9 +657,8 @@ class FederationServer(FederationBase):
)
pdu = await self.handler.on_make_knock_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {
"event": pdu.get_pdu_json(time_now),
"event": pdu.get_templated_pdu_json(),
"room_version": room_version.identifier,
}
@ -791,7 +788,7 @@ class FederationServer(FederationBase):
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self.store.claim_e2e_one_time_keys(query)
json_result = {} # type: Dict[str, Dict[str, dict]]
json_result: Dict[str, Dict[str, dict]] = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_str in keys.items():
@ -1119,17 +1116,13 @@ class FederationHandlerRegistry:
self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
self.edu_handlers = (
{}
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
self.query_handlers = (
{}
) # type: Dict[str, Callable[[dict], Awaitable[JsonDict]]]
self.edu_handlers: Dict[str, Callable[[str, dict], Awaitable[None]]] = {}
self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {}
# Map from type to instance names that we should route EDU handling to.
# We randomly choose one instance from the list to route to for each new
# EDU received.
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
self._edu_type_to_instance: Dict[str, List[str]] = {}
def register_edu_handler(
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]

View file

@ -71,34 +71,32 @@ class FederationRemoteSendQueue(AbstractFederationSender):
# We may have multiple federation sender instances, so we need to track
# their positions separately.
self._sender_instances = hs.config.worker.federation_shard_config.instances
self._sender_positions = {} # type: Dict[str, int]
self._sender_positions: Dict[str, int] = {}
# Pending presence map user_id -> UserPresenceState
self.presence_map = {} # type: Dict[str, UserPresenceState]
self.presence_map: Dict[str, UserPresenceState] = {}
# Stores the destinations we need to explicitly send presence to about a
# given user.
# Stream position -> (user_id, destinations)
self.presence_destinations = (
SortedDict()
) # type: SortedDict[int, Tuple[str, Iterable[str]]]
self.presence_destinations: SortedDict[
int, Tuple[str, Iterable[str]]
] = SortedDict()
# (destination, key) -> EDU
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
self.keyed_edu: Dict[Tuple[str, tuple], Edu] = {}
# stream position -> (destination, key)
self.keyed_edu_changed = (
SortedDict()
) # type: SortedDict[int, Tuple[str, tuple]]
self.keyed_edu_changed: SortedDict[int, Tuple[str, tuple]] = SortedDict()
self.edus = SortedDict() # type: SortedDict[int, Edu]
self.edus: SortedDict[int, Edu] = SortedDict()
# stream ID for the next entry into keyed_edu_changed/edus.
self.pos = 1
# map from stream ID to the time that stream entry was generated, so that we
# can clear out entries after a while
self.pos_time = SortedDict() # type: SortedDict[int, int]
self.pos_time: SortedDict[int, int] = SortedDict()
# EVERYTHING IS SAD. In particular, python only makes new scopes when
# we make a new function, so we need to make a new function so the inner
@ -291,7 +289,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
# list of tuple(int, BaseFederationRow), where the first is the position
# of the federation stream.
rows = [] # type: List[Tuple[int, BaseFederationRow]]
rows: List[Tuple[int, BaseFederationRow]] = []
# Fetch presence to send to destinations
i = self.presence_destinations.bisect_right(from_token)
@ -445,11 +443,11 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
_rowtypes = (
_rowtypes: Tuple[Type[BaseFederationRow], ...] = (
PresenceDestinationsRow,
KeyedEduRow,
EduRow,
) # type: Tuple[Type[BaseFederationRow], ...]
)
TypeToRow = {Row.TypeId: Row for Row in _rowtypes}

View file

@ -14,9 +14,12 @@
import abc
import logging
from collections import OrderedDict
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple
import attr
from prometheus_client import Counter
from typing_extensions import Literal
from twisted.internet import defer
@ -33,8 +36,12 @@ from synapse.metrics import (
event_processing_loop_room_count,
events_processed_counter,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
from synapse.util import Clock
from synapse.util.metrics import Measure
if TYPE_CHECKING:
@ -137,6 +144,84 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
raise NotImplementedError()
@attr.s
class _PresenceQueue:
"""A queue of destinations that need to be woken up due to new presence
updates.
Staggers waking up of per destination queues to ensure that we don't attempt
to start TLS connections with many hosts all at once, leading to pinned CPU.
"""
# The maximum duration in seconds between queuing up a destination and it
# being woken up.
_MAX_TIME_IN_QUEUE = 30.0
# The maximum duration in seconds between waking up consecutive destination
# queues.
_MAX_DELAY = 0.1
sender: "FederationSender" = attr.ib()
clock: Clock = attr.ib()
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
processing: bool = attr.ib(default=False)
def add_to_queue(self, destination: str) -> None:
"""Add a destination to the queue to be woken up."""
self.queue[destination] = None
if not self.processing:
self._handle()
@wrap_as_background_process("_PresenceQueue.handle")
async def _handle(self) -> None:
"""Background process to drain the queue."""
if not self.queue:
return
assert not self.processing
self.processing = True
try:
# We start with a delay that should drain the queue quickly enough that
# we process all destinations in the queue in _MAX_TIME_IN_QUEUE
# seconds.
#
# We also add an upper bound to the delay, to gracefully handle the
# case where the queue only has a few entries in it.
current_sleep_seconds = min(
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
)
while self.queue:
destination, _ = self.queue.popitem(last=False)
queue = self.sender._get_per_destination_queue(destination)
if not queue._new_data_to_send:
# The per destination queue has already been woken up.
continue
queue.attempt_new_transaction()
await self.clock.sleep(current_sleep_seconds)
if not self.queue:
break
# More destinations may have been added to the queue, so we may
# need to reduce the delay to ensure everything gets processed
# within _MAX_TIME_IN_QUEUE seconds.
current_sleep_seconds = min(
current_sleep_seconds, self._MAX_TIME_IN_QUEUE / len(self.queue)
)
finally:
self.processing = False
class FederationSender(AbstractFederationSender):
def __init__(self, hs: "HomeServer"):
self.hs = hs
@ -148,14 +233,14 @@ class FederationSender(AbstractFederationSender):
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
self._presence_router = None # type: Optional[PresenceRouter]
self._presence_router: Optional["PresenceRouter"] = None
self._transaction_manager = TransactionManager(hs)
self._instance_name = hs.get_instance_name()
self._federation_shard_config = hs.config.worker.federation_shard_config
# map from destination to PerDestinationQueue
self._per_destination_queues = {} # type: Dict[str, PerDestinationQueue]
self._per_destination_queues: Dict[str, PerDestinationQueue] = {}
LaterGauge(
"synapse_federation_transaction_queue_pending_destinations",
@ -192,9 +277,7 @@ class FederationSender(AbstractFederationSender):
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
# here for a given room means that we are rate-limiting RR flushes to that room,
# and that there is a pending call to _flush_rrs_for_room in the system.
self._queues_awaiting_rr_flush_by_room = (
{}
) # type: Dict[str, Set[PerDestinationQueue]]
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
self._rr_txn_interval_per_room_ms = (
1000.0 / hs.config.federation_rr_transactions_per_room_per_second
@ -210,6 +293,8 @@ class FederationSender(AbstractFederationSender):
self._external_cache = hs.get_external_cache()
self._presence_queue = _PresenceQueue(self, self.clock)
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
"""Get or create a PerDestinationQueue for the given destination
@ -265,7 +350,7 @@ class FederationSender(AbstractFederationSender):
if not event.internal_metadata.should_proactively_send():
return
destinations = None # type: Optional[Set[str]]
destinations: Optional[Set[str]] = None
if not event.prev_event_ids():
# If there are no prev event IDs then the state is empty
# and so no remote servers in the room
@ -331,7 +416,7 @@ class FederationSender(AbstractFederationSender):
for event in events:
await handle_event(event)
events_by_room = {} # type: Dict[str, List[EventBase]]
events_by_room: Dict[str, List[EventBase]] = {}
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
@ -519,7 +604,12 @@ class FederationSender(AbstractFederationSender):
self._instance_name, destination
):
continue
self._get_per_destination_queue(destination).send_presence(states)
self._get_per_destination_queue(destination).send_presence(
states, start_loop=False
)
self._presence_queue.add_to_queue(destination)
def build_and_send_edu(
self,
@ -628,7 +718,7 @@ class FederationSender(AbstractFederationSender):
In order to reduce load spikes, adds a delay between each destination.
"""
last_processed = None # type: Optional[str]
last_processed: Optional[str] = None
while True:
destinations_to_wake = (

View file

@ -105,34 +105,34 @@ class PerDestinationQueue:
# catch-up at startup.
# New events will only be sent once this is finished, at which point
# _catching_up is flipped to False.
self._catching_up = True # type: bool
self._catching_up: bool = True
# The stream_ordering of the most recent PDU that was discarded due to
# being in catch-up mode.
self._catchup_last_skipped = 0 # type: int
self._catchup_last_skipped: int = 0
# Cache of the last successfully-transmitted stream ordering for this
# destination (we are the only updater so this is safe)
self._last_successful_stream_ordering = None # type: Optional[int]
self._last_successful_stream_ordering: Optional[int] = None
# a queue of pending PDUs
self._pending_pdus = [] # type: List[EventBase]
self._pending_pdus: List[EventBase] = []
# XXX this is never actually used: see
# https://github.com/matrix-org/synapse/issues/7549
self._pending_edus = [] # type: List[Edu]
self._pending_edus: List[Edu] = []
# Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
# based on their key (e.g. typing events by room_id)
# Map of (edu_type, key) -> Edu
self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu]
self._pending_edus_keyed: Dict[Tuple[str, Hashable], Edu] = {}
# Map of user_id -> UserPresenceState of pending presence to be sent to this
# destination
self._pending_presence = {} # type: Dict[str, UserPresenceState]
self._pending_presence: Dict[str, UserPresenceState] = {}
# room_id -> receipt_type -> user_id -> receipt_dict
self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]]
self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {}
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
@ -171,14 +171,24 @@ class PerDestinationQueue:
self.attempt_new_transaction()
def send_presence(self, states: Iterable[UserPresenceState]) -> None:
"""Add presence updates to the queue. Start the transmission loop if necessary.
def send_presence(
self, states: Iterable[UserPresenceState], start_loop: bool = True
) -> None:
"""Add presence updates to the queue.
Args:
states: Presence updates to send
start_loop: Whether to start the transmission loop if not already
running.
Args:
states: presence to send
"""
self._pending_presence.update({state.user_id: state for state in states})
self.attempt_new_transaction()
self._new_data_to_send = True
if start_loop:
self.attempt_new_transaction()
def queue_read_receipt(self, receipt: ReadReceipt) -> None:
"""Add a RR to the list to be sent. Doesn't start the transmission loop yet
@ -243,7 +253,7 @@ class PerDestinationQueue:
)
async def _transaction_transmission_loop(self) -> None:
pending_pdus = [] # type: List[EventBase]
pending_pdus: List[EventBase] = []
try:
self.transmission_loop_running = True

View file

@ -395,9 +395,9 @@ class TransportLayerClient:
# this uses MSC2197 (Search Filtering over Federation)
path = _create_v1_path("/publicRooms")
data = {
data: Dict[str, Any] = {
"include_all_networks": "true" if include_all_networks else "false"
} # type: Dict[str, Any]
}
if third_party_instance_id:
data["third_party_instance_id"] = third_party_instance_id
if limit:
@ -423,9 +423,9 @@ class TransportLayerClient:
else:
path = _create_v1_path("/publicRooms")
args = {
args: Dict[str, Any] = {
"include_all_networks": "true" if include_all_networks else "false"
} # type: Dict[str, Any]
}
if third_party_instance_id:
args["third_party_instance_id"] = (third_party_instance_id,)
if limit:

View file

@ -1013,7 +1013,7 @@ class PublicRoomList(BaseFederationServlet):
if not self.allow_access:
raise FederationDeniedError(origin)
limit = int(content.get("limit", 100)) # type: Optional[int]
limit: Optional[int] = int(content.get("limit", 100))
since_token = content.get("since", None)
search_filter = content.get("filter", None)
@ -1095,7 +1095,9 @@ class FederationGroupsProfileServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1110,7 +1112,9 @@ class FederationGroupsProfileServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1131,7 +1135,9 @@ class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1152,7 +1158,9 @@ class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1174,7 +1182,9 @@ class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1192,7 +1202,9 @@ class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1220,7 +1232,9 @@ class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
room_id: str,
config_key: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1243,7 +1257,9 @@ class FederationGroupsUsersServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1264,7 +1280,9 @@ class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1288,7 +1306,9 @@ class FederationGroupsInviteServlet(BaseGroupsServerServlet):
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1354,7 +1374,9 @@ class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1487,7 +1509,9 @@ class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1523,7 +1547,9 @@ class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1549,7 +1575,9 @@ class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1571,7 +1599,9 @@ class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1589,7 +1619,9 @@ class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1618,7 +1650,9 @@ class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1644,7 +1678,9 @@ class FederationGroupsRolesServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1666,7 +1702,9 @@ class FederationGroupsRoleServlet(BaseGroupsServerServlet):
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1682,7 +1720,9 @@ class FederationGroupsRoleServlet(BaseGroupsServerServlet):
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1713,7 +1753,9 @@ class FederationGroupsRoleServlet(BaseGroupsServerServlet):
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1750,7 +1792,9 @@ class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
role_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1784,7 +1828,9 @@ class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
role_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1825,7 +1871,9 @@ class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@ -1943,7 +1991,7 @@ class RoomComplexityServlet(BaseFederationServlet):
return 200, complexity
FEDERATION_SERVLET_CLASSES = (
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
FederationStateV1Servlet,
@ -1971,15 +2019,13 @@ FEDERATION_SERVLET_CLASSES = (
FederationSpaceSummaryServlet,
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
)
OPENID_SERVLET_CLASSES = (
OpenIdUserInfo,
) # type: Tuple[Type[BaseFederationServlet], ...]
OPENID_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (OpenIdUserInfo,)
ROOM_LIST_CLASSES = (PublicRoomList,) # type: Tuple[Type[PublicRoomList], ...]
ROOM_LIST_CLASSES: Tuple[Type[PublicRoomList], ...] = (PublicRoomList,)
GROUP_SERVER_SERVLET_CLASSES = (
GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsProfileServlet,
FederationGroupsSummaryServlet,
FederationGroupsRoomsServlet,
@ -1998,19 +2044,19 @@ GROUP_SERVER_SERVLET_CLASSES = (
FederationGroupsAddRoomsServlet,
FederationGroupsAddRoomsConfigServlet,
FederationGroupsSettingJoinPolicyServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
)
GROUP_LOCAL_SERVLET_CLASSES = (
GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsLocalInviteServlet,
FederationGroupsRemoveLocalUserServlet,
FederationGroupsBulkPublicisedServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
)
GROUP_ATTESTATION_SERVLET_CLASSES = (
GROUP_ATTESTATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsRenewAttestaionServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
)
DEFAULT_SERVLET_GROUPS = (

View file

@ -707,9 +707,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
See accept_invite, join_group.
"""
if not self.hs.is_mine_id(user_id):
local_attestation = self.attestations.create_attestation(
group_id, user_id
) # type: Optional[JsonDict]
local_attestation: Optional[
JsonDict
] = self.attestations.create_attestation(group_id, user_id)
remote_attestation = content["attestation"]
@ -868,9 +868,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
remote_attestation, user_id=requester_user_id, group_id=group_id
)
local_attestation = self.attestations.create_attestation(
group_id, requester_user_id
) # type: Optional[JsonDict]
local_attestation: Optional[
JsonDict
] = self.attestations.create_attestation(group_id, requester_user_id)
else:
local_attestation = None
remote_attestation = None

View file

@ -38,10 +38,10 @@ class BaseHandler:
"""
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore() # type: synapse.storage.DataStore
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.notifier = hs.get_notifier()
self.state_handler = hs.get_state_handler() # type: synapse.state.StateHandler
self.state_handler = hs.get_state_handler()
self.distributor = hs.get_distributor()
self.clock = hs.get_clock()
self.hs = hs
@ -55,12 +55,12 @@ class BaseHandler:
# Check whether ratelimiting room admin message redaction is enabled
# by the presence of rate limits in the config
if self.hs.config.rc_admin_redaction:
self.admin_redaction_ratelimiter = Ratelimiter(
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=self.hs.config.rc_admin_redaction.per_second,
burst_count=self.hs.config.rc_admin_redaction.burst_count,
) # type: Optional[Ratelimiter]
)
else:
self.admin_redaction_ratelimiter = None

View file

@ -15,9 +15,11 @@
import email.mime.multipart
import email.utils
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
from synapse.api.errors import StoreError, SynapseError
from twisted.web.http import Request
from synapse.api.errors import AuthError, StoreError, SynapseError
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.types import UserID
from synapse.util import stringutils
@ -27,6 +29,15 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
# Types for callbacks to be registered via the module api
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
class AccountValidityHandler:
def __init__(self, hs: "HomeServer"):
@ -70,6 +81,99 @@ class AccountValidityHandler:
if hs.config.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
self._on_legacy_send_mail_callback: Optional[
ON_LEGACY_SEND_MAIL_CALLBACK
] = None
self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
# The legacy admin requests callback isn't a protected attribute because we need
# to access it from the admin servlet, which is outside of this handler.
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
def register_account_validity_callbacks(
self,
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
):
"""Register callbacks from module for each hook."""
if is_user_expired is not None:
self._is_user_expired_callbacks.append(is_user_expired)
if on_user_registration is not None:
self._on_user_registration_callbacks.append(on_user_registration)
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
# an admin one). As part of moving the feature into a module, we need to change
# the path from /_matrix/client/unstable/account_validity/... to
# /_synapse/client/account_validity, because:
#
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
# * the way we register servlets means that modules can't register resources
# under /_matrix/client
#
# We need to allow for a transition period between the old and new endpoints
# in order to allow for clients to update (and for emails to be processed).
#
# Once the email-account-validity module is loaded, it will take control of account
# validity by moving the rows from our `account_validity` table into its own table.
#
# Therefore, we need to allow modules (in practice just the one implementing the
# email-based account validity) to temporarily hook into the legacy endpoints so we
# can route the traffic coming into the old endpoints into the module, which is
# why we have the following three temporary hooks.
if on_legacy_send_mail is not None:
if self._on_legacy_send_mail_callback is not None:
raise RuntimeError("Tried to register on_legacy_send_mail twice")
self._on_legacy_send_mail_callback = on_legacy_send_mail
if on_legacy_renew is not None:
if self._on_legacy_renew_callback is not None:
raise RuntimeError("Tried to register on_legacy_renew twice")
self._on_legacy_renew_callback = on_legacy_renew
if on_legacy_admin_request is not None:
if self.on_legacy_admin_request_callback is not None:
raise RuntimeError("Tried to register on_legacy_admin_request twice")
self.on_legacy_admin_request_callback = on_legacy_admin_request
async def is_user_expired(self, user_id: str) -> bool:
"""Checks if a user has expired against third-party modules.
Args:
user_id: The user to check the expiry of.
Returns:
Whether the user has expired.
"""
for callback in self._is_user_expired_callbacks:
expired = await callback(user_id)
if expired is not None:
return expired
if self._account_validity_enabled:
# If no module could determine whether the user has expired and the legacy
# configuration is enabled, fall back to it.
return await self.store.is_account_expired(user_id, self.clock.time_msec())
return False
async def on_user_registration(self, user_id: str):
"""Tell third-party modules about a user's registration.
Args:
user_id: The ID of the newly registered user.
"""
for callback in self._on_user_registration_callbacks:
await callback(user_id)
@wrap_as_background_process("send_renewals")
async def _send_renewal_emails(self) -> None:
"""Gets the list of users whose account is expiring in the amount of time
@ -95,6 +199,17 @@ class AccountValidityHandler:
Raises:
SynapseError if the user is not set to renew.
"""
# If a module supports sending a renewal email from here, do that, otherwise do
# the legacy dance.
if self._on_legacy_send_mail_callback is not None:
await self._on_legacy_send_mail_callback(user_id)
return
if not self._account_validity_renew_by_email_enabled:
raise AuthError(
403, "Account renewal via email is disabled on this server."
)
expiration_ts = await self.store.get_expiration_ts_for_user(user_id)
# If this user isn't set to be expired, raise an error.
@ -209,6 +324,10 @@ class AccountValidityHandler:
token is considered stale. A token is stale if the 'token_used_ts_ms' db column
is non-null.
This method exists to support handling the legacy account validity /renew
endpoint. If a module implements the on_legacy_renew callback, then this process
is delegated to the module instead.
Args:
renewal_token: Token sent with the renewal request.
Returns:
@ -218,6 +337,11 @@ class AccountValidityHandler:
* An int representing the user's expiry timestamp as milliseconds since the
epoch, or 0 if the token was invalid.
"""
# If a module supports triggering a renew from here, do that, otherwise do the
# legacy dance.
if self._on_legacy_renew_callback is not None:
return await self._on_legacy_renew_callback(renewal_token)
try:
(
user_id,

View file

@ -139,7 +139,7 @@ class AdminHandler(BaseHandler):
to_key = RoomStreamToken(None, stream_ordering)
# Events that we've processed in this room
written_events = set() # type: Set[str]
written_events: Set[str] = set()
# We need to track gaps in the events stream so that we can then
# write out the state at those events. We do this by keeping track
@ -152,7 +152,7 @@ class AdminHandler(BaseHandler):
# The reverse mapping to above, i.e. map from unseen event to events
# that have the unseen event in their prev_events, i.e. the unseen
# events "children".
unseen_to_child_events = {} # type: Dict[str, Set[str]]
unseen_to_child_events: Dict[str, Set[str]] = {}
# We fetch events in the room the user could see by fetching *all*
# events that we have and then filtering, this isn't the most

View file

@ -96,7 +96,7 @@ class ApplicationServicesHandler:
self.current_max, limit
)
events_by_room = {} # type: Dict[str, List[EventBase]]
events_by_room: Dict[str, List[EventBase]] = {}
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
@ -275,7 +275,7 @@ class ApplicationServicesHandler:
async def _handle_presence(
self, service: ApplicationService, users: Collection[Union[str, UserID]]
) -> List[JsonDict]:
events = [] # type: List[JsonDict]
events: List[JsonDict] = []
presence_source = self.event_sources.sources["presence"]
from_key = await self.store.get_type_stream_id_for_appservice(
service, "presence"
@ -375,7 +375,7 @@ class ApplicationServicesHandler:
self, only_protocol: Optional[str] = None
) -> Dict[str, JsonDict]:
services = self.store.get_app_services()
protocols = {} # type: Dict[str, List[JsonDict]]
protocols: Dict[str, List[JsonDict]] = {}
# Collect up all the individual protocol responses out of the ASes
for s in services:

View file

@ -191,7 +191,7 @@ class AuthHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.checkers = {} # type: Dict[str, UserInteractiveAuthChecker]
self.checkers: Dict[str, UserInteractiveAuthChecker] = {}
for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
inst = auth_checker_class(hs)
if inst.is_enabled():
@ -296,7 +296,7 @@ class AuthHandler(BaseHandler):
# A mapping of user ID to extra attributes to include in the login
# response.
self._extra_attributes = {} # type: Dict[str, SsoLoginExtraAttributes]
self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {}
async def validate_user_via_ui_auth(
self,
@ -500,7 +500,7 @@ class AuthHandler(BaseHandler):
all the stages in any of the permitted flows.
"""
sid = None # type: Optional[str]
sid: Optional[str] = None
authdict = clientdict.pop("auth", {})
if "session" in authdict:
sid = authdict["session"]
@ -588,9 +588,9 @@ class AuthHandler(BaseHandler):
)
# check auth type currently being presented
errordict = {} # type: Dict[str, Any]
errordict: Dict[str, Any] = {}
if "type" in authdict:
login_type = authdict["type"] # type: str
login_type: str = authdict["type"]
try:
result = await self._check_auth_dict(authdict, clientip)
if result:
@ -766,7 +766,7 @@ class AuthHandler(BaseHandler):
LoginType.TERMS: self._get_params_terms,
}
params = {} # type: Dict[str, Any]
params: Dict[str, Any] = {}
for f in public_flows:
for stage in f:
@ -1530,9 +1530,9 @@ class AuthHandler(BaseHandler):
except StoreError:
raise SynapseError(400, "Unknown session ID: %s" % (session_id,))
user_id_to_verify = await self.get_session_data(
user_id_to_verify: str = await self.get_session_data(
session_id, UIAuthSessionDataConstants.REQUEST_USER_ID
) # type: str
)
idps = await self.hs.get_sso_handler().get_identity_providers_for_user(
user_id_to_verify

View file

@ -40,7 +40,7 @@ class CasError(Exception):
def __str__(self):
if self.error_description:
return "{}: {}".format(self.error, self.error_description)
return f"{self.error}: {self.error_description}"
return self.error
@ -171,7 +171,7 @@ class CasHandler:
# Iterate through the nodes and pull out the user and any extra attributes.
user = None
attributes = {} # type: Dict[str, List[Optional[str]]]
attributes: Dict[str, List[Optional[str]]] = {}
for child in root[0]:
if child.tag.endswith("user"):
user = child.text

View file

@ -452,7 +452,7 @@ class DeviceHandler(DeviceWorkerHandler):
user_id
)
hosts = set() # type: Set[str]
hosts: Set[str] = set()
if self.hs.is_mine_id(user_id):
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
hosts.discard(self.server_name)
@ -613,20 +613,20 @@ class DeviceListUpdater:
self._remote_edu_linearizer = Linearizer(name="remote_device_list")
# user_id -> list of updates waiting to be handled.
self._pending_updates = (
{}
) # type: Dict[str, List[Tuple[str, str, Iterable[str], JsonDict]]]
self._pending_updates: Dict[
str, List[Tuple[str, str, Iterable[str], JsonDict]]
] = {}
# Recently seen stream ids. We don't bother keeping these in the DB,
# but they're useful to have them about to reduce the number of spurious
# resyncs.
self._seen_updates = ExpiringCache(
self._seen_updates: ExpiringCache[str, Set[str]] = ExpiringCache(
cache_name="device_update_edu",
clock=self.clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
iterable=True,
) # type: ExpiringCache[str, Set[str]]
)
# Attempt to resync out of sync device lists every 30s.
self._resync_retry_in_progress = False
@ -755,7 +755,7 @@ class DeviceListUpdater:
"""Given a list of updates for a user figure out if we need to do a full
resync, or whether we have enough data that we can just apply the delta.
"""
seen_updates = self._seen_updates.get(user_id, set()) # type: Set[str]
seen_updates: Set[str] = self._seen_updates.get(user_id, set())
extremity = await self.store.get_device_list_last_stream_id_for_remote(user_id)

View file

@ -203,7 +203,7 @@ class DeviceMessageHandler:
log_kv({"number_of_to_device_messages": len(messages)})
set_tag("sender", sender_user_id)
local_messages = {}
remote_messages = {} # type: Dict[str, Dict[str, Dict[str, JsonDict]]]
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for user_id, by_device in messages.items():
# Ratelimit local cross-user key requests by the sending device.
if (

View file

@ -22,6 +22,7 @@ from synapse.api.errors import (
CodeMessageException,
Codes,
NotFoundError,
RequestSendFailed,
ShadowBanError,
StoreError,
SynapseError,
@ -238,9 +239,9 @@ class DirectoryHandler(BaseHandler):
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
room_id = None
if self.hs.is_mine(room_alias):
result = await self.get_association_from_room_alias(
room_alias
) # type: Optional[RoomAliasMapping]
result: Optional[
RoomAliasMapping
] = await self.get_association_from_room_alias(room_alias)
if result:
room_id = result.room_id
@ -254,12 +255,14 @@ class DirectoryHandler(BaseHandler):
retry_on_dns_fail=False,
ignore_backoff=True,
)
except RequestSendFailed:
raise SynapseError(502, "Failed to fetch alias")
except CodeMessageException as e:
logging.warning("Error retrieving alias")
if e.code == 404:
fed_result = None
else:
raise
raise SynapseError(502, "Failed to fetch alias")
if fed_result and "room_id" in fed_result and "servers" in fed_result:
room_id = fed_result["room_id"]

View file

@ -115,9 +115,9 @@ class E2eKeysHandler:
the number of in-flight queries at a time.
"""
with await self._query_devices_linearizer.queue((from_user_id, from_device_id)):
device_keys_query = query_body.get(
device_keys_query: Dict[str, Iterable[str]] = query_body.get(
"device_keys", {}
) # type: Dict[str, Iterable[str]]
)
# separate users by domain.
# make a map from domain to user_id to device_ids
@ -136,7 +136,7 @@ class E2eKeysHandler:
# First get local devices.
# A map of destination -> failure response.
failures = {} # type: Dict[str, JsonDict]
failures: Dict[str, JsonDict] = {}
results = {}
if local_query:
local_result = await self.query_local_devices(local_query)
@ -151,11 +151,9 @@ class E2eKeysHandler:
# Now attempt to get any remote devices from our local cache.
# A map of destination -> user ID -> device IDs.
remote_queries_not_in_cache = (
{}
) # type: Dict[str, Dict[str, Iterable[str]]]
remote_queries_not_in_cache: Dict[str, Dict[str, Iterable[str]]] = {}
if remote_queries:
query_list = [] # type: List[Tuple[str, Optional[str]]]
query_list: List[Tuple[str, Optional[str]]] = []
for user_id, device_ids in remote_queries.items():
if device_ids:
query_list.extend(
@ -362,9 +360,9 @@ class E2eKeysHandler:
A map from user_id -> device_id -> device details
"""
set_tag("local_query", query)
local_query = [] # type: List[Tuple[str, Optional[str]]]
local_query: List[Tuple[str, Optional[str]]] = []
result_dict = {} # type: Dict[str, Dict[str, dict]]
result_dict: Dict[str, Dict[str, dict]] = {}
for user_id, device_ids in query.items():
# we use UserID.from_string to catch invalid user ids
if not self.is_mine(UserID.from_string(user_id)):
@ -402,9 +400,9 @@ class E2eKeysHandler:
self, query_body: Dict[str, Dict[str, Optional[List[str]]]]
) -> JsonDict:
"""Handle a device key query from a federated server"""
device_keys_query = query_body.get(
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
"device_keys", {}
) # type: Dict[str, Optional[List[str]]]
)
res = await self.query_local_devices(device_keys_query)
ret = {"device_keys": res}
@ -421,8 +419,8 @@ class E2eKeysHandler:
async def claim_one_time_keys(
self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: int
) -> JsonDict:
local_query = [] # type: List[Tuple[str, str, str]]
remote_queries = {} # type: Dict[str, Dict[str, Dict[str, str]]]
local_query: List[Tuple[str, str, str]] = []
remote_queries: Dict[str, Dict[str, Dict[str, str]]] = {}
for user_id, one_time_keys in query.get("one_time_keys", {}).items():
# we use UserID.from_string to catch invalid user ids
@ -439,8 +437,8 @@ class E2eKeysHandler:
results = await self.store.claim_e2e_one_time_keys(local_query)
# A map of user ID -> device ID -> key ID -> key.
json_result = {} # type: Dict[str, Dict[str, Dict[str, JsonDict]]]
failures = {} # type: Dict[str, JsonDict]
json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
failures: Dict[str, JsonDict] = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_str in keys.items():
@ -768,8 +766,8 @@ class E2eKeysHandler:
Raises:
SynapseError: if the input is malformed
"""
signature_list = [] # type: List[SignatureListItem]
failures = {} # type: Dict[str, Dict[str, JsonDict]]
signature_list: List["SignatureListItem"] = []
failures: Dict[str, Dict[str, JsonDict]] = {}
if not signatures:
return signature_list, failures
@ -930,8 +928,8 @@ class E2eKeysHandler:
Raises:
SynapseError: if the input is malformed
"""
signature_list = [] # type: List[SignatureListItem]
failures = {} # type: Dict[str, Dict[str, JsonDict]]
signature_list: List["SignatureListItem"] = []
failures: Dict[str, Dict[str, JsonDict]] = {}
if not signatures:
return signature_list, failures
@ -1300,7 +1298,7 @@ class SigningKeyEduUpdater:
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
# user_id -> list of updates waiting to be handled.
self._pending_updates = {} # type: Dict[str, List[Tuple[JsonDict, JsonDict]]]
self._pending_updates: Dict[str, List[Tuple[JsonDict, JsonDict]]] = {}
async def incoming_signing_key_update(
self, origin: str, edu_content: JsonDict
@ -1349,7 +1347,7 @@ class SigningKeyEduUpdater:
# This can happen since we batch updates
return
device_ids = [] # type: List[str]
device_ids: List[str] = []
logger.info("pending updates: %r", pending_updates)

View file

@ -93,7 +93,7 @@ class EventStreamHandler(BaseHandler):
# When the user joins a new room, or another user joins a currently
# joined room, we need to send down presence for those users.
to_add = [] # type: List[JsonDict]
to_add: List[JsonDict] = []
for event in events:
if not isinstance(event, EventBase):
continue
@ -103,9 +103,9 @@ class EventStreamHandler(BaseHandler):
# Send down presence.
if event.state_key == auth_user_id:
# Send down presence for everyone in the room.
users = await self.store.get_users_in_room(
users: Iterable[str] = await self.store.get_users_in_room(
event.room_id
) # type: Iterable[str]
)
else:
users = [event.state_key]

View file

@ -181,7 +181,7 @@ class FederationHandler(BaseHandler):
# When joining a room we need to queue any events for that room up.
# For each room, a list of (pdu, origin) tuples.
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
self.room_queues: Dict[str, List[Tuple[EventBase, str]]] = {}
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
self._room_backfill = Linearizer("room_backfill")
@ -368,7 +368,7 @@ class FederationHandler(BaseHandler):
ours = await self.state_store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
state_maps = list(ours.values()) # type: List[StateMap[str]]
state_maps: List[StateMap[str]] = list(ours.values())
# we don't need this any more, let's delete it.
del ours
@ -735,7 +735,7 @@ class FederationHandler(BaseHandler):
# we need to make sure we re-load from the database to get the rejected
# state correct.
fetched_events.update(
(await self.store.get_events(missing_desired_events, allow_rejected=True))
await self.store.get_events(missing_desired_events, allow_rejected=True)
)
# check for events which were in the wrong room.
@ -845,7 +845,7 @@ class FederationHandler(BaseHandler):
# exact key to expect. Otherwise check it matches any key we
# have for that device.
current_keys = [] # type: Container[str]
current_keys: Container[str] = []
if device:
keys = device.get("keys", {}).get("keys", {})
@ -1185,7 +1185,7 @@ class FederationHandler(BaseHandler):
if e_type == EventTypes.Member and event.membership == Membership.JOIN
]
joined_domains = {} # type: Dict[str, int]
joined_domains: Dict[str, int] = {}
for u, d in joined_users:
try:
dom = get_domain_from_id(u)
@ -1314,7 +1314,7 @@ class FederationHandler(BaseHandler):
room_version = await self.store.get_room_version(room_id)
event_map = {} # type: Dict[str, EventBase]
event_map: Dict[str, EventBase] = {}
async def get_event(event_id: str):
with nested_logging_context(event_id):
@ -1414,12 +1414,15 @@ class FederationHandler(BaseHandler):
Invites must be signed by the invitee's server before distribution.
"""
pdu = await self.federation_client.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
pdu=event,
)
try:
pdu = await self.federation_client.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
pdu=event,
)
except RequestSendFailed:
raise SynapseError(502, f"Can't connect to server {target_host}")
return pdu
@ -1593,7 +1596,7 @@ class FederationHandler(BaseHandler):
# Ask the remote server to create a valid knock event for us. Once received,
# we sign the event
params = {"ver": supported_room_versions} # type: Dict[str, Iterable[str]]
params: Dict[str, Iterable[str]] = {"ver": supported_room_versions}
origin, event, event_format_version = await self._make_and_verify_event(
target_hosts, room_id, knockee, Membership.KNOCK, content, params=params
)
@ -1931,7 +1934,7 @@ class FederationHandler(BaseHandler):
builder=builder
)
event_allowed = await self.third_party_event_rules.check_event_allowed(
event_allowed, _ = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@ -2023,7 +2026,7 @@ class FederationHandler(BaseHandler):
# for knock events, we run the third-party event rules. It's not entirely clear
# why we don't do this for other sorts of membership events.
if event.membership == Membership.KNOCK:
event_allowed = await self.third_party_event_rules.check_event_allowed(
event_allowed, _ = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@ -2450,14 +2453,14 @@ class FederationHandler(BaseHandler):
state_sets_d = await self.state_store.get_state_groups(
event.room_id, extrem_ids
)
state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]]
state_sets: List[Iterable[EventBase]] = list(state_sets_d.values())
state_sets.append(state)
current_states = await self.state_handler.resolve_events(
room_version, state_sets, event
)
current_state_ids = {
current_state_ids: StateMap[str] = {
k: e.event_id for k, e in current_states.items()
} # type: StateMap[str]
}
else:
current_state_ids = await self.state_handler.get_current_state_ids(
event.room_id, latest_event_ids=extrem_ids
@ -2814,7 +2817,7 @@ class FederationHandler(BaseHandler):
"""
# exclude the state key of the new event from the current_state in the context.
if event.is_state():
event_key = (event.type, event.state_key) # type: Optional[Tuple[str, str]]
event_key: Optional[Tuple[str, str]] = (event.type, event.state_key)
else:
event_key = None
state_updates = {
@ -3031,9 +3034,13 @@ class FederationHandler(BaseHandler):
await member_handler.send_membership_event(None, event, context)
else:
destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)}
await self.federation_client.forward_third_party_invite(
destinations, room_id, event_dict
)
try:
await self.federation_client.forward_third_party_invite(
destinations, room_id, event_dict
)
except (RequestSendFailed, HttpResponseException):
raise SynapseError(502, "Failed to forward third party invite")
async def on_exchange_third_party_invite_request(
self, event_dict: JsonDict
@ -3149,7 +3156,7 @@ class FederationHandler(BaseHandler):
logger.debug("Checking auth on event %r", event.content)
last_exception = None # type: Optional[Exception]
last_exception: Optional[Exception] = None
# for each public key in the 3pid invite event
for public_key_object in event_auth.get_public_keys(invite_event):

View file

@ -214,7 +214,7 @@ class GroupsLocalWorkerHandler:
async def bulk_get_publicised_groups(
self, user_ids: Iterable[str], proxy: bool = True
) -> JsonDict:
destinations = {} # type: Dict[str, Set[str]]
destinations: Dict[str, Set[str]] = {}
local_users = set()
for user_id in user_ids:
@ -227,7 +227,7 @@ class GroupsLocalWorkerHandler:
raise SynapseError(400, "Some user_ids are not local")
results = {}
failed_results = [] # type: List[str]
failed_results: List[str] = []
for destination, dest_user_ids in destinations.items():
try:
r = await self.transport_client.bulk_get_publicised_groups(

View file

@ -302,7 +302,7 @@ class IdentityHandler(BaseHandler):
)
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii")
url_bytes = b"/_matrix/identity/api/v1/3pid/unbind"
content = {
"mxid": mxid,
@ -695,7 +695,7 @@ class IdentityHandler(BaseHandler):
return data["mxid"]
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except IOError as e:
except OSError as e:
logger.warning("Error from v1 identity server lookup: %s" % (e,))
return None

View file

@ -46,9 +46,17 @@ class InitialSyncHandler(BaseHandler):
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.snapshot_cache = ResponseCache(
hs.get_clock(), "initial_sync_cache"
) # type: ResponseCache[Tuple[str, Optional[StreamToken], Optional[StreamToken], str, Optional[int], bool, bool]]
self.snapshot_cache: ResponseCache[
Tuple[
str,
Optional[StreamToken],
Optional[StreamToken],
str,
Optional[int],
bool,
bool,
]
] = ResponseCache(hs.get_clock(), "initial_sync_cache")
self._event_serializer = hs.get_event_client_serializer()
self.storage = hs.get_storage()
self.state_store = self.storage.state

View file

@ -81,7 +81,7 @@ class MessageHandler:
# The scheduled call to self._expire_event. None if no call is currently
# scheduled.
self._scheduled_expiry = None # type: Optional[IDelayedCall]
self._scheduled_expiry: Optional[IDelayedCall] = None
if not hs.config.worker_app:
run_as_background_process(
@ -196,9 +196,7 @@ class MessageHandler:
room_state_events = await self.state_store.get_state_for_events(
[event.event_id], state_filter=state_filter
)
room_state = room_state_events[
event.event_id
] # type: Mapping[Any, EventBase]
room_state: Mapping[Any, EventBase] = room_state_events[event.event_id]
else:
raise AuthError(
403,
@ -423,9 +421,9 @@ class EventCreationHandler:
self.action_generator = hs.get_action_generator()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = (
self.third_party_event_rules: "ThirdPartyEventRules" = (
self.hs.get_third_party_event_rules()
) # type: ThirdPartyEventRules
)
self._block_events_without_consent_error = (
self.config.block_events_without_consent_error
@ -442,7 +440,7 @@ class EventCreationHandler:
#
# map from room id to time-of-last-attempt.
#
self._rooms_to_exclude_from_dummy_event_insertion = {} # type: Dict[str, int]
self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {}
# The number of forward extremeities before a dummy event is sent.
self._dummy_events_threshold = hs.config.dummy_events_threshold
@ -467,9 +465,7 @@ class EventCreationHandler:
# Stores the state groups we've recently added to the joined hosts
# external cache. Note that the timeout must be significantly less than
# the TTL on the external cache.
self._external_cache_joined_hosts_updates = (
None
) # type: Optional[ExpiringCache]
self._external_cache_joined_hosts_updates: Optional[ExpiringCache] = None
if self._external_cache.is_enabled():
self._external_cache_joined_hosts_updates = ExpiringCache(
"_external_cache_joined_hosts_updates",
@ -520,6 +516,9 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@ -774,6 +773,7 @@ class EventCreationHandler:
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
"""
@ -801,6 +801,9 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@ -849,6 +852,7 @@ class EventCreationHandler:
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
outlier=outlier,
historical=historical,
depth=depth,
)
@ -947,10 +951,10 @@ class EventCreationHandler:
if requester:
context.app_service = requester.app_service
third_party_result = await self.third_party_event_rules.check_event_allowed(
res, new_content = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not third_party_result:
if res is False:
logger.info(
"Event %s forbidden by third-party rules",
event,
@ -958,11 +962,11 @@ class EventCreationHandler:
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN
)
elif isinstance(third_party_result, dict):
elif new_content is not None:
# the third-party rules want to replace the event. We'll need to build a new
# event.
event, context = await self._rebuild_event_after_third_party_rules(
third_party_result, event
new_content, event
)
self.validator.validate_new(event, self.config)
@ -1294,7 +1298,7 @@ class EventCreationHandler:
# Validate a newly added alias or newly added alt_aliases.
original_alias = None
original_alt_aliases = [] # type: List[str]
original_alt_aliases: List[str] = []
original_event_id = event.unsigned.get("replaces_state")
if original_event_id:
@ -1597,11 +1601,13 @@ class EventCreationHandler:
for k, v in original_event.internal_metadata.get_dict().items():
setattr(builder.internal_metadata, k, v)
# the event type hasn't changed, so there's no point in re-calculating the
# auth events.
# modules can send new state events, so we re-calculate the auth events just in
# case.
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
event = await builder.build(
prev_event_ids=original_event.prev_event_ids(),
auth_event_ids=original_event.auth_event_ids(),
prev_event_ids=prev_event_ids,
auth_event_ids=None,
)
# we rebuild the event context, to be on the safe side. If nothing else,

View file

@ -72,26 +72,26 @@ _SESSION_COOKIES = [
(b"oidc_session_no_samesite", b"HttpOnly"),
]
#: A token exchanged from the token endpoint, as per RFC6749 sec 5.1. and
#: OpenID.Core sec 3.1.3.3.
Token = TypedDict(
"Token",
{
"access_token": str,
"token_type": str,
"id_token": Optional[str],
"refresh_token": Optional[str],
"expires_in": int,
"scope": Optional[str],
},
)
class Token(TypedDict):
access_token: str
token_type: str
id_token: Optional[str]
refresh_token: Optional[str]
expires_in: int
scope: Optional[str]
#: A JWK, as per RFC7517 sec 4. The type could be more precise than that, but
#: there is no real point of doing this in our case.
JWK = Dict[str, str]
#: A JWK Set, as per RFC7517 sec 5.
JWKS = TypedDict("JWKS", {"keys": List[JWK]})
class JWKS(TypedDict):
keys: List[JWK]
class OidcHandler:
@ -105,9 +105,9 @@ class OidcHandler:
assert provider_confs
self._token_generator = OidcSessionTokenGenerator(hs)
self._providers = {
self._providers: Dict[str, "OidcProvider"] = {
p.idp_id: OidcProvider(hs, self._token_generator, p) for p in provider_confs
} # type: Dict[str, OidcProvider]
}
async def load_metadata(self) -> None:
"""Validate the config and load the metadata from the remote endpoint.
@ -178,7 +178,7 @@ class OidcHandler:
# are two.
for cookie_name, _ in _SESSION_COOKIES:
session = request.getCookie(cookie_name) # type: Optional[bytes]
session: Optional[bytes] = request.getCookie(cookie_name)
if session is not None:
break
else:
@ -255,7 +255,7 @@ class OidcError(Exception):
def __str__(self):
if self.error_description:
return "{}: {}".format(self.error, self.error_description)
return f"{self.error}: {self.error_description}"
return self.error
@ -277,7 +277,7 @@ class OidcProvider:
self._token_generator = token_generator
self._config = provider
self._callback_url = hs.config.oidc_callback_url # type: str
self._callback_url: str = hs.config.oidc_callback_url
# Calculate the prefix for OIDC callback paths based on the public_baseurl.
# We'll insert this into the Path= parameter of any session cookies we set.
@ -290,7 +290,7 @@ class OidcProvider:
self._scopes = provider.scopes
self._user_profile_method = provider.user_profile_method
client_secret = None # type: Union[None, str, JwtClientSecret]
client_secret: Optional[Union[str, JwtClientSecret]] = None
if provider.client_secret:
client_secret = provider.client_secret
elif provider.client_secret_jwt_key:
@ -305,7 +305,7 @@ class OidcProvider:
provider.client_id,
client_secret,
provider.client_auth_method,
) # type: ClientAuth
)
self._client_auth_method = provider.client_auth_method
# cache of metadata for the identity provider (endpoint uris, mostly). This is
@ -324,7 +324,7 @@ class OidcProvider:
self._allow_existing_users = provider.allow_existing_users
self._http_client = hs.get_proxied_http_client()
self._server_name = hs.config.server_name # type: str
self._server_name: str = hs.config.server_name
# identifier for the external_ids table
self.idp_id = provider.idp_id
@ -639,7 +639,7 @@ class OidcProvider:
)
logger.warning(description)
# Body was still valid JSON. Might be useful to log it for debugging.
logger.warning("Code exchange response: {resp!r}".format(resp=resp))
logger.warning("Code exchange response: %r", resp)
raise OidcError("server_error", description)
return resp
@ -1217,10 +1217,12 @@ class OidcSessionData:
ui_auth_session_id = attr.ib(type=str)
UserAttributeDict = TypedDict(
"UserAttributeDict",
{"localpart": Optional[str], "display_name": Optional[str], "emails": List[str]},
)
class UserAttributeDict(TypedDict):
localpart: Optional[str]
display_name: Optional[str]
emails: List[str]
C = TypeVar("C")
@ -1381,7 +1383,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
if display_name == "":
display_name = None
emails = [] # type: List[str]
emails: List[str] = []
email = render_template_field(self._config.email_template)
if email:
emails.append(email)
@ -1391,7 +1393,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
)
async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict:
extras = {} # type: Dict[str, str]
extras: Dict[str, str] = {}
for key, template in self._config.extra_attributes.items():
try:
extras[key] = template.render(user=userinfo).strip()

View file

@ -81,9 +81,9 @@ class PaginationHandler:
self._server_name = hs.hostname
self.pagination_lock = ReadWriteLock()
self._purges_in_progress_by_room = set() # type: Set[str]
self._purges_in_progress_by_room: Set[str] = set()
# map from purge id to PurgeStatus
self._purges_by_id = {} # type: Dict[str, PurgeStatus]
self._purges_by_id: Dict[str, PurgeStatus] = {}
self._event_serializer = hs.get_event_client_serializer()
self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime

View file

@ -378,14 +378,14 @@ class WorkerPresenceHandler(BasePresenceHandler):
# The number of ongoing syncs on this process, by user id.
# Empty if _presence_enabled is false.
self._user_to_num_current_syncs = {} # type: Dict[str, int]
self._user_to_num_current_syncs: Dict[str, int] = {}
self.notifier = hs.get_notifier()
self.instance_id = hs.get_instance_id()
# user_id -> last_sync_ms. Lists the users that have stopped syncing but
# we haven't notified the presence writer of that yet
self.users_going_offline = {} # type: Dict[str, int]
self.users_going_offline: Dict[str, int] = {}
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
@ -650,7 +650,7 @@ class PresenceHandler(BasePresenceHandler):
# Set of users who have presence in the `user_to_current_state` that
# have not yet been persisted
self.unpersisted_users_changes = set() # type: Set[str]
self.unpersisted_users_changes: Set[str] = set()
hs.get_reactor().addSystemEventTrigger(
"before",
@ -664,7 +664,7 @@ class PresenceHandler(BasePresenceHandler):
# Keeps track of the number of *ongoing* syncs on this process. While
# this is non zero a user will never go offline.
self.user_to_num_current_syncs = {} # type: Dict[str, int]
self.user_to_num_current_syncs: Dict[str, int] = {}
# Keeps track of the number of *ongoing* syncs on other processes.
# While any sync is ongoing on another process the user will never
@ -674,8 +674,8 @@ class PresenceHandler(BasePresenceHandler):
# we assume that all the sync requests on that process have stopped.
# Stored as a dict from process_id to set of user_id, and a dict of
# process_id to millisecond timestamp last updated.
self.external_process_to_current_syncs = {} # type: Dict[str, Set[str]]
self.external_process_last_updated_ms = {} # type: Dict[str, int]
self.external_process_to_current_syncs: Dict[str, Set[str]] = {}
self.external_process_last_updated_ms: Dict[str, int] = {}
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
@ -1581,9 +1581,7 @@ class PresenceEventSource:
# The set of users that we're interested in and that have had a presence update.
# We'll actually pull the presence updates for these users at the end.
interested_and_updated_users = (
set()
) # type: Union[Set[str], FrozenSet[str]]
interested_and_updated_users: Union[Set[str], FrozenSet[str]] = set()
if from_key:
# First get all users that have had a presence update
@ -1950,8 +1948,8 @@ async def get_interested_parties(
A 2-tuple of `(room_ids_to_states, users_to_states)`,
with each item being a dict of `entity_name` -> `[UserPresenceState]`
"""
room_ids_to_states = {} # type: Dict[str, List[UserPresenceState]]
users_to_states = {} # type: Dict[str, List[UserPresenceState]]
room_ids_to_states: Dict[str, List[UserPresenceState]] = {}
users_to_states: Dict[str, List[UserPresenceState]] = {}
for state in states:
room_ids = await store.get_rooms_for_user(state.user_id)
for room_id in room_ids:
@ -2063,12 +2061,12 @@ class PresenceFederationQueue:
# stream_id, destinations, user_ids)`. We don't store the full states
# for efficiency, and remote workers will already have the full states
# cached.
self._queue = [] # type: List[Tuple[int, int, Collection[str], Set[str]]]
self._queue: List[Tuple[int, int, Collection[str], Set[str]]] = []
self._next_id = 1
# Map from instance name to current token
self._current_tokens = {} # type: Dict[str, int]
self._current_tokens: Dict[str, int] = {}
if self._queue_presence_updates:
self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS)
@ -2168,7 +2166,7 @@ class PresenceFederationQueue:
# handle the case where `from_token` stream ID has already been dropped.
start_idx = max(from_token + 1 - self._next_id, -len(self._queue))
to_send = [] # type: List[Tuple[int, Tuple[str, str]]]
to_send: List[Tuple[int, Tuple[str, str]]] = []
limited = False
new_id = upto_token
for _, stream_id, destinations, user_ids in self._queue[start_idx:]:
@ -2216,7 +2214,7 @@ class PresenceFederationQueue:
if not self._federation:
return
hosts_to_users = {} # type: Dict[str, Set[str]]
hosts_to_users: Dict[str, Set[str]] = {}
for row in rows:
hosts_to_users.setdefault(row.destination, set()).add(row.user_id)

View file

@ -197,7 +197,7 @@ class ProfileHandler(BaseHandler):
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
)
displayname_to_set = new_displayname # type: Optional[str]
displayname_to_set: Optional[str] = new_displayname
if new_displayname == "":
displayname_to_set = None
@ -286,7 +286,7 @@ class ProfileHandler(BaseHandler):
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
)
avatar_url_to_set = new_avatar_url # type: Optional[str]
avatar_url_to_set: Optional[str] = new_avatar_url
if new_avatar_url == "":
avatar_url_to_set = None

View file

@ -30,6 +30,8 @@ class ReceiptsHandler(BaseHandler):
self.server_name = hs.config.server_name
self.store = hs.get_datastore()
self.event_auth_handler = hs.get_event_auth_handler()
self.hs = hs
# We only need to poke the federation sender explicitly if its on the
@ -59,6 +61,19 @@ class ReceiptsHandler(BaseHandler):
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
for room_id, room_values in content.items():
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
is_in_room = await self.event_auth_handler.check_host_in_room(
room_id, self.server_name
)
if not is_in_room:
logger.info(
"Ignoring receipt from %s as we're not in the room",
origin,
)
continue
for receipt_type, users in room_values.items():
for user_id, user_values in users.items():
if get_domain_from_id(user_id) != origin:
@ -83,8 +98,8 @@ class ReceiptsHandler(BaseHandler):
async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool:
"""Takes a list of receipts, stores them and informs the notifier."""
min_batch_id = None # type: Optional[int]
max_batch_id = None # type: Optional[int]
min_batch_id: Optional[int] = None
max_batch_id: Optional[int] = None
for receipt in receipts:
res = await self.store.insert_receipt(

View file

@ -55,15 +55,12 @@ login_counter = Counter(
["guest", "auth_provider"],
)
LoginDict = TypedDict(
"LoginDict",
{
"device_id": str,
"access_token": str,
"valid_until_ms": Optional[int],
"refresh_token": Optional[str],
},
)
class LoginDict(TypedDict):
device_id: str
access_token: str
valid_until_ms: Optional[int]
refresh_token: Optional[str]
class RegistrationHandler(BaseHandler):
@ -77,6 +74,7 @@ class RegistrationHandler(BaseHandler):
self.identity_handler = self.hs.get_identity_handler()
self.ratelimiter = hs.get_registration_ratelimiter()
self.macaroon_gen = hs.get_macaroon_generator()
self._account_validity_handler = hs.get_account_validity_handler()
self._server_notices_mxid = hs.config.server_notices_mxid
self._server_name = hs.hostname
@ -707,6 +705,10 @@ class RegistrationHandler(BaseHandler):
shadow_banned=shadow_banned,
)
# Only call the account validity module(s) on the main process, to avoid
# repeating e.g. database writes on all of the workers.
await self._account_validity_handler.on_user_registration(user_id)
async def register_device(
self,
user_id: str,

View file

@ -87,7 +87,7 @@ class RoomCreationHandler(BaseHandler):
self.config = hs.config
# Room state based off defined presets
self._presets_dict = {
self._presets_dict: Dict[str, Dict[str, Any]] = {
RoomCreationPreset.PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": HistoryVisibility.SHARED,
@ -109,7 +109,7 @@ class RoomCreationHandler(BaseHandler):
"guest_can_join": False,
"power_level_content_override": {},
},
} # type: Dict[str, Dict[str, Any]]
}
# Modify presets to selectively enable encryption by default per homeserver config
for preset_name, preset_config in self._presets_dict.items():
@ -127,9 +127,9 @@ class RoomCreationHandler(BaseHandler):
# If a user tries to update the same room multiple times in quick
# succession, only process the first attempt and return its result to
# subsequent requests
self._upgrade_response_cache = ResponseCache(
self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
) # type: ResponseCache[Tuple[str, str]]
)
self._server_notices_mxid = hs.config.server_notices_mxid
self.third_party_event_rules = hs.get_third_party_event_rules()
@ -377,10 +377,10 @@ class RoomCreationHandler(BaseHandler):
if not await self.spam_checker.user_may_create_room(user_id):
raise SynapseError(403, "You are not permitted to create rooms")
creation_content = {
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
} # type: JsonDict
}
# Check if old room was non-federatable
@ -618,15 +618,11 @@ class RoomCreationHandler(BaseHandler):
else:
is_requester_admin = await self.auth.is_server_admin(requester.user)
# Check whether the third party rules allows/changes the room create
# request.
event_allowed = await self.third_party_event_rules.on_create_room(
# Let the third party rules modify the room creation config if needed, or abort
# the room creation entirely with an exception.
await self.third_party_event_rules.on_create_room(
requester, config, is_requester_admin=is_requester_admin
)
if not event_allowed:
raise SynapseError(
403, "You are not permitted to create rooms", Codes.FORBIDDEN
)
if not is_requester_admin and not await self.spam_checker.user_may_create_room(
user_id
@ -946,7 +942,7 @@ class RoomCreationHandler(BaseHandler):
etype=EventTypes.PowerLevels, content=pl_content
)
else:
power_level_content = {
power_level_content: JsonDict = {
"users": {creator_id: 9001},
"users_default": 0,
"events": {
@ -965,7 +961,7 @@ class RoomCreationHandler(BaseHandler):
"kick": 50,
"redact": 50,
"invite": 50,
} # type: JsonDict
}
if config["original_invitees_have_ops"]:
for invitee in invite_list:

View file

@ -20,7 +20,12 @@ import msgpack
from unpaddedbase64 import decode_base64, encode_base64
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules
from synapse.api.errors import Codes, HttpResponseException
from synapse.api.errors import (
Codes,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util.caches.descriptors import cached
from synapse.util.caches.response_cache import ResponseCache
@ -42,12 +47,12 @@ class RoomListHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.enable_room_list_search = hs.config.enable_room_list_search
self.response_cache = ResponseCache(
hs.get_clock(), "room_list"
) # type: ResponseCache[Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]]
self.remote_response_cache = ResponseCache(
hs.get_clock(), "remote_room_list", timeout_ms=30 * 1000
) # type: ResponseCache[Tuple[str, Optional[int], Optional[str], bool, Optional[str]]]
self.response_cache: ResponseCache[
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
] = ResponseCache(hs.get_clock(), "room_list")
self.remote_response_cache: ResponseCache[
Tuple[str, Optional[int], Optional[str], bool, Optional[str]]
] = ResponseCache(hs.get_clock(), "remote_room_list", timeout_ms=30 * 1000)
async def get_local_public_room_list(
self,
@ -134,10 +139,10 @@ class RoomListHandler(BaseHandler):
if since_token:
batch_token = RoomListNextBatch.from_token(since_token)
bounds = (
bounds: Optional[Tuple[int, str]] = (
batch_token.last_joined_members,
batch_token.last_room_id,
) # type: Optional[Tuple[int, str]]
)
forwards = batch_token.direction_is_forward
has_batch_token = True
else:
@ -177,7 +182,7 @@ class RoomListHandler(BaseHandler):
results = [build_room_entry(r) for r in results]
response = {} # type: JsonDict
response: JsonDict = {}
num_results = len(results)
if limit is not None:
more_to_come = num_results == probing_limit
@ -378,7 +383,11 @@ class RoomListHandler(BaseHandler):
):
logger.debug("Falling back to locally-filtered /publicRooms")
else:
raise # Not an error that should trigger a fallback.
# Not an error that should trigger a fallback.
raise SynapseError(502, "Failed to fetch room list")
except RequestSendFailed:
# Not an error that should trigger a fallback.
raise SynapseError(502, "Failed to fetch room list")
# if we reach this point, then we fall back to the situation where
# we currently don't support searching across federation, so we have
@ -417,14 +426,17 @@ class RoomListHandler(BaseHandler):
repl_layer = self.hs.get_federation_client()
if search_filter:
# We can't cache when asking for search
return await repl_layer.get_public_rooms(
server_name,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
try:
return await repl_layer.get_public_rooms(
server_name,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
except (RequestSendFailed, HttpResponseException):
raise SynapseError(502, "Failed to fetch room list")
key = (
server_name,

View file

@ -83,7 +83,7 @@ class SamlHandler(BaseHandler):
self.unstable_idp_brand = None
# a map from saml session id to Saml2SessionData object
self._outstanding_requests_dict = {} # type: Dict[str, Saml2SessionData]
self._outstanding_requests_dict: Dict[str, Saml2SessionData] = {}
self._sso_handler = hs.get_sso_handler()
self._sso_handler.register_identity_provider(self)
@ -372,7 +372,7 @@ class SamlHandler(BaseHandler):
DOT_REPLACE_PATTERN = re.compile(
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
"[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)
)
@ -386,10 +386,10 @@ def dot_replace_for_mxid(username: str) -> str:
return username
MXID_MAPPER_MAP = {
MXID_MAPPER_MAP: Dict[str, Callable[[str], str]] = {
"hexencode": map_username_to_mxid_localpart,
"dotreplace": dot_replace_for_mxid,
} # type: Dict[str, Callable[[str], str]]
}
@attr.s

View file

@ -192,7 +192,7 @@ class SearchHandler(BaseHandler):
# If doing a subset of all rooms seearch, check if any of the rooms
# are from an upgraded room, and search their contents as well
if search_filter.rooms:
historical_room_ids = [] # type: List[str]
historical_room_ids: List[str] = []
for room_id in search_filter.rooms:
# Add any previous rooms to the search if they exist
ids = await self.get_old_rooms_from_upgraded_room(room_id)
@ -216,9 +216,9 @@ class SearchHandler(BaseHandler):
rank_map = {} # event_id -> rank of event
allowed_events = []
# Holds result of grouping by room, if applicable
room_groups = {} # type: Dict[str, JsonDict]
room_groups: Dict[str, JsonDict] = {}
# Holds result of grouping by sender, if applicable
sender_group = {} # type: Dict[str, JsonDict]
sender_group: Dict[str, JsonDict] = {}
# Holds the next_batch for the entire result set if one of those exists
global_next_batch = None
@ -262,7 +262,7 @@ class SearchHandler(BaseHandler):
s["results"].append(e.event_id)
elif order_by == "recent":
room_events = [] # type: List[EventBase]
room_events: List[EventBase] = []
i = 0
pagination_token = batch_token

View file

@ -24,6 +24,7 @@ from synapse.api.constants import (
EventContentFields,
EventTypes,
HistoryVisibility,
JoinRules,
Membership,
RoomTypes,
)
@ -89,14 +90,14 @@ class SpaceSummaryHandler:
room_queue = deque((_RoomQueueEntry(room_id, ()),))
# rooms we have already processed
processed_rooms = set() # type: Set[str]
processed_rooms: Set[str] = set()
# events we have already processed. We don't necessarily have their event ids,
# so instead we key on (room id, state key)
processed_events = set() # type: Set[Tuple[str, str]]
processed_events: Set[Tuple[str, str]] = set()
rooms_result = [] # type: List[JsonDict]
events_result = [] # type: List[JsonDict]
rooms_result: List[JsonDict] = []
events_result: List[JsonDict] = []
while room_queue and len(rooms_result) < MAX_ROOMS:
queue_entry = room_queue.popleft()
@ -150,14 +151,21 @@ class SpaceSummaryHandler:
# The room should only be included in the summary if:
# a. the user is in the room;
# b. the room is world readable; or
# c. the user is in a space that has been granted access to
# the room.
# c. the user could join the room, e.g. the join rules
# are set to public or the user is in a space that
# has been granted access to the room.
#
# Note that we know the user is not in the root room (which is
# why the remote call was made in the first place), but the user
# could be in one of the children rooms and we just didn't know
# about the link.
include_room = room.get("world_readable") is True
# The API doesn't return the room version so assume that a
# join rule of knock is valid.
include_room = (
room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
or room.get("world_readable") is True
)
# Check if the user is a member of any of the allowed spaces
# from the response.
@ -264,10 +272,10 @@ class SpaceSummaryHandler:
# the set of rooms that we should not walk further. Initialise it with the
# excluded-rooms list; we will add other rooms as we process them so that
# we do not loop.
processed_rooms = set(exclude_rooms) # type: Set[str]
processed_rooms: Set[str] = set(exclude_rooms)
rooms_result = [] # type: List[JsonDict]
events_result = [] # type: List[JsonDict]
rooms_result: List[JsonDict] = []
events_result: List[JsonDict] = []
while room_queue and len(rooms_result) < MAX_ROOMS:
room_id = room_queue.popleft()
@ -345,7 +353,7 @@ class SpaceSummaryHandler:
max_children = MAX_ROOMS_PER_SPACE
now = self._clock.time_msec()
events_result = [] # type: List[JsonDict]
events_result: List[JsonDict] = []
for edge_event in itertools.islice(child_events, max_children):
events_result.append(
await self._event_serializer.serialize_event(
@ -420,9 +428,8 @@ class SpaceSummaryHandler:
It should be included if:
* The requester is joined or invited to the room.
* The requester can join without an invite (per MSC3083).
* The origin server has any user that is joined or invited to the room.
* The requester is joined or can join the room (per MSC3173).
* The origin server has any user that is joined or can join the room.
* The history visibility is set to world readable.
Args:
@ -441,13 +448,39 @@ class SpaceSummaryHandler:
# If there's no state for the room, it isn't known.
if not state_ids:
# The user might have a pending invite for the room.
if requester and await self._store.get_invite_for_local_user_in_room(
requester, room_id
):
return True
logger.info("room %s is unknown, omitting from summary", room_id)
return False
room_version = await self._store.get_room_version(room_id)
# if we have an authenticated requesting user, first check if they are able to view
# stripped state in the room.
# Include the room if it has join rules of public or knock.
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
if join_rules_event_id:
join_rules_event = await self._store.get_event(join_rules_event_id)
join_rule = join_rules_event.content.get("join_rule")
if join_rule == JoinRules.PUBLIC or (
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
):
return True
# Include the room if it is peekable.
hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""))
if hist_vis_event_id:
hist_vis_ev = await self._store.get_event(hist_vis_event_id)
hist_vis = hist_vis_ev.content.get("history_visibility")
if hist_vis == HistoryVisibility.WORLD_READABLE:
return True
# Otherwise we need to check information specific to the user or server.
# If we have an authenticated requesting user, check if they are a member
# of the room (or can join the room).
if requester:
member_event_id = state_ids.get((EventTypes.Member, requester), None)
@ -470,9 +503,11 @@ class SpaceSummaryHandler:
return True
# If this is a request over federation, check if the host is in the room or
# is in one of the spaces specified via the join rules.
# has a user who could join the room.
elif origin:
if await self._event_auth_handler.check_host_in_room(room_id, origin):
if await self._event_auth_handler.check_host_in_room(
room_id, origin
) or await self._store.is_host_invited(room_id, origin):
return True
# Alternately, if the host has a user in any of the spaces specified
@ -490,18 +525,10 @@ class SpaceSummaryHandler:
):
return True
# otherwise, check if the room is peekable
hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""), None)
if hist_vis_event_id:
hist_vis_ev = await self._store.get_event(hist_vis_event_id)
hist_vis = hist_vis_ev.content.get("history_visibility")
if hist_vis == HistoryVisibility.WORLD_READABLE:
return True
logger.info(
"room %s is unpeekable and user %s is not a member / not allowed to join, omitting from summary",
"room %s is unpeekable and requester %s is not a member / not allowed to join, omitting from summary",
room_id,
requester,
requester or origin,
)
return False
@ -535,6 +562,7 @@ class SpaceSummaryHandler:
"canonical_alias": stats["canonical_alias"],
"num_joined_members": stats["joined_members"],
"avatar_url": stats["avatar"],
"join_rules": stats["join_rules"],
"world_readable": (
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
),

View file

@ -202,10 +202,10 @@ class SsoHandler:
self._mapping_lock = Linearizer(name="sso_user_mapping", clock=hs.get_clock())
# a map from session id to session data
self._username_mapping_sessions = {} # type: Dict[str, UsernameMappingSession]
self._username_mapping_sessions: Dict[str, UsernameMappingSession] = {}
# map from idp_id to SsoIdentityProvider
self._identity_providers = {} # type: Dict[str, SsoIdentityProvider]
self._identity_providers: Dict[str, SsoIdentityProvider] = {}
self._consent_at_registration = hs.config.consent.user_consent_at_registration
@ -296,7 +296,7 @@ class SsoHandler:
)
# if the client chose an IdP, use that
idp = None # type: Optional[SsoIdentityProvider]
idp: Optional[SsoIdentityProvider] = None
if idp_id:
idp = self._identity_providers.get(idp_id)
if not idp:
@ -669,9 +669,9 @@ class SsoHandler:
remote_user_id,
)
user_id_to_verify = await self._auth_handler.get_session_data(
user_id_to_verify: str = await self._auth_handler.get_session_data(
ui_auth_session_id, UIAuthSessionDataConstants.REQUEST_USER_ID
) # type: str
)
if not user_id:
logger.warning(
@ -793,7 +793,7 @@ class SsoHandler:
session.use_display_name = use_display_name
emails_from_idp = set(session.emails)
filtered_emails = set() # type: Set[str]
filtered_emails: Set[str] = set()
# we iterate through the list rather than just building a set conjunction, so
# that we can log attempts to use unknown addresses

View file

@ -45,12 +45,11 @@ class StatsHandler:
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.stats_bucket_size = hs.config.stats_bucket_size
self.stats_enabled = hs.config.stats_enabled
# The current position in the current_state_delta stream
self.pos = None # type: Optional[int]
self.pos: Optional[int] = None
# Guard to ensure we only process deltas one at a time
self._is_processing = False
@ -106,20 +105,6 @@ class StatsHandler:
room_deltas = {}
user_deltas = {}
# Then count deltas for total_events and total_event_bytes.
(
room_count,
user_count,
) = await self.store.get_changes_room_total_events_and_bytes(
self.pos, max_pos
)
for room_id, fields in room_count.items():
room_deltas.setdefault(room_id, Counter()).update(fields)
for user_id, fields in user_count.items():
user_deltas.setdefault(user_id, Counter()).update(fields)
logger.debug("room_deltas: %s", room_deltas)
logger.debug("user_deltas: %s", user_deltas)
@ -146,10 +131,10 @@ class StatsHandler:
mapping from room/user ID to changes in the various fields.
"""
room_to_stats_deltas = {} # type: Dict[str, CounterType[str]]
user_to_stats_deltas = {} # type: Dict[str, CounterType[str]]
room_to_stats_deltas: Dict[str, CounterType[str]] = {}
user_to_stats_deltas: Dict[str, CounterType[str]] = {}
room_to_state_updates = {} # type: Dict[str, Dict[str, Any]]
room_to_state_updates: Dict[str, Dict[str, Any]] = {}
for delta in deltas:
typ = delta["type"]
@ -179,14 +164,12 @@ class StatsHandler:
)
continue
event_content = {} # type: JsonDict
event_content: JsonDict = {}
sender = None
if event_id is not None:
event = await self.store.get_event(event_id, allow_none=True)
if event:
event_content = event.content or {}
sender = event.sender
# All the values in this dict are deltas (RELATIVE changes)
room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter())
@ -244,12 +227,6 @@ class StatsHandler:
room_stats_delta["joined_members"] += 1
elif membership == Membership.INVITE:
room_stats_delta["invited_members"] += 1
if sender and self.is_mine_id(sender):
user_to_stats_deltas.setdefault(sender, Counter())[
"invites_sent"
] += 1
elif membership == Membership.LEAVE:
room_stats_delta["left_members"] += 1
elif membership == Membership.BAN:
@ -279,10 +256,6 @@ class StatsHandler:
room_state["is_federatable"] = (
event_content.get("m.federate", True) is True
)
if sender and self.is_mine_id(sender):
user_to_stats_deltas.setdefault(sender, Counter())[
"rooms_created"
] += 1
elif typ == EventTypes.JoinRules:
room_state["join_rules"] = event_content.get("join_rule")
elif typ == EventTypes.RoomHistoryVisibility:

View file

@ -278,12 +278,14 @@ class SyncHandler:
self.state_store = self.storage.state
# ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
self.lazy_loaded_members_cache = ExpiringCache(
self.lazy_loaded_members_cache: ExpiringCache[
Tuple[str, Optional[str]], LruCache[str, str]
] = ExpiringCache(
"lazy_loaded_members_cache",
self.clock,
max_len=0,
expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
) # type: ExpiringCache[Tuple[str, Optional[str]], LruCache[str, str]]
)
async def wait_for_sync_for_user(
self,
@ -440,7 +442,7 @@ class SyncHandler:
)
now_token = now_token.copy_and_replace("typing_key", typing_key)
ephemeral_by_room = {} # type: JsonDict
ephemeral_by_room: JsonDict = {}
for event in typing:
# we want to exclude the room_id from the event, but modifying the
@ -502,7 +504,7 @@ class SyncHandler:
# We check if there are any state events, if there are then we pass
# all current state events to the filter_events function. This is to
# ensure that we always include current state in the timeline
current_state_ids = frozenset() # type: FrozenSet[str]
current_state_ids: FrozenSet[str] = frozenset()
if any(e.is_state() for e in recents):
current_state_ids_map = await self.store.get_current_state_ids(
room_id
@ -783,9 +785,9 @@ class SyncHandler:
def get_lazy_loaded_members_cache(
self, cache_key: Tuple[str, Optional[str]]
) -> LruCache[str, str]:
cache = self.lazy_loaded_members_cache.get(
cache: Optional[LruCache[str, str]] = self.lazy_loaded_members_cache.get(
cache_key
) # type: Optional[LruCache[str, str]]
)
if cache is None:
logger.debug("creating LruCache for %r", cache_key)
cache = LruCache(LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE)
@ -984,7 +986,7 @@ class SyncHandler:
if t[0] == EventTypes.Member:
cache.set(t[1], event_id)
state = {} # type: Dict[str, EventBase]
state: Dict[str, EventBase] = {}
if state_ids:
state = await self.store.get_events(list(state_ids.values()))
@ -1087,8 +1089,8 @@ class SyncHandler:
logger.debug("Fetching OTK data")
device_id = sync_config.device_id
one_time_key_counts = {} # type: JsonDict
unused_fallback_key_types = [] # type: List[str]
one_time_key_counts: JsonDict = {}
unused_fallback_key_types: List[str] = []
if device_id:
one_time_key_counts = await self.store.count_e2e_one_time_keys(
user_id, device_id
@ -1436,7 +1438,7 @@ class SyncHandler:
)
if block_all_room_ephemeral:
ephemeral_by_room = {} # type: Dict[str, List[JsonDict]]
ephemeral_by_room: Dict[str, List[JsonDict]] = {}
else:
now_token, ephemeral_by_room = await self.ephemeral_by_room(
sync_result_builder,
@ -1467,7 +1469,7 @@ class SyncHandler:
# If there is ignored users account data and it matches the proper type,
# then use it.
ignored_users = frozenset() # type: FrozenSet[str]
ignored_users: FrozenSet[str] = frozenset()
if ignored_account_data:
ignored_users_data = ignored_account_data.get("ignored_users", {})
if isinstance(ignored_users_data, dict):
@ -1585,7 +1587,7 @@ class SyncHandler:
user_id, since_token.room_key, now_token.room_key
)
mem_change_events_by_room_id = {} # type: Dict[str, List[EventBase]]
mem_change_events_by_room_id: Dict[str, List[EventBase]] = {}
for event in rooms_changed:
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
@ -1598,7 +1600,7 @@ class SyncHandler:
logger.debug(
"Membership changes in %s: [%s]",
room_id,
", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
", ".join("%s (%s)" % (e.event_id, e.membership) for e in events),
)
non_joins = [e for e in events if e.membership != Membership.JOIN]
@ -1721,7 +1723,7 @@ class SyncHandler:
# This is all screaming out for a refactor, as the logic here is
# subtle and the moving parts numerous.
if leave_event.internal_metadata.is_out_of_band_membership():
batch_events = [leave_event] # type: Optional[List[EventBase]]
batch_events: Optional[List[EventBase]] = [leave_event]
else:
batch_events = None
@ -1970,7 +1972,7 @@ class SyncHandler:
room_id, batch, sync_config, since_token, now_token, full_state=full_state
)
summary = {} # type: Optional[JsonDict]
summary: Optional[JsonDict] = {}
# we include a summary in room responses when we're lazy loading
# members (as the client otherwise doesn't have enough info to form
@ -1994,7 +1996,7 @@ class SyncHandler:
)
if room_builder.rtype == "joined":
unread_notifications = {} # type: Dict[str, int]
unread_notifications: Dict[str, int] = {}
room_sync = JoinedSyncResult(
room_id=room_id,
timeline=batch,

View file

@ -68,11 +68,11 @@ class FollowerTypingHandler:
)
# map room IDs to serial numbers
self._room_serials = {} # type: Dict[str, int]
self._room_serials: Dict[str, int] = {}
# map room IDs to sets of users currently typing
self._room_typing = {} # type: Dict[str, Set[str]]
self._room_typing: Dict[str, Set[str]] = {}
self._member_last_federation_poke = {} # type: Dict[RoomMember, int]
self._member_last_federation_poke: Dict[RoomMember, int] = {}
self.wheel_timer = WheelTimer(bucket_size=5000)
self._latest_room_serial = 0
@ -208,6 +208,7 @@ class TypingWriterHandler(FollowerTypingHandler):
self.auth = hs.get_auth()
self.notifier = hs.get_notifier()
self.event_auth_handler = hs.get_event_auth_handler()
self.hs = hs
@ -216,7 +217,7 @@ class TypingWriterHandler(FollowerTypingHandler):
hs.get_distributor().observe("user_left_room", self.user_left_room)
# clock time we expect to stop
self._member_typing_until = {} # type: Dict[RoomMember, int]
self._member_typing_until: Dict[RoomMember, int] = {}
# caches which room_ids changed at which serials
self._typing_stream_change_cache = StreamChangeCache(
@ -326,6 +327,19 @@ class TypingWriterHandler(FollowerTypingHandler):
room_id = content["room_id"]
user_id = content["user_id"]
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
is_in_room = await self.event_auth_handler.check_host_in_room(
room_id, self.server_name
)
if not is_in_room:
logger.info(
"Ignoring typing update from %s as we're not in the room",
origin,
)
return
member = RoomMember(user_id=user_id, room_id=room_id)
# Check that the string is a valid user id
@ -391,9 +405,9 @@ class TypingWriterHandler(FollowerTypingHandler):
if last_id == current_id:
return [], current_id, False
changed_rooms = self._typing_stream_change_cache.get_all_entities_changed(
last_id
) # type: Optional[Iterable[str]]
changed_rooms: Optional[
Iterable[str]
] = self._typing_stream_change_cache.get_all_entities_changed(last_id)
if changed_rooms is None:
changed_rooms = self._room_serials

View file

@ -52,7 +52,7 @@ class UserDirectoryHandler(StateDeltasHandler):
self.search_all_users = hs.config.user_directory_search_all_users
self.spam_checker = hs.get_spam_checker()
# The current position in the current_state_delta stream
self.pos = None # type: Optional[int]
self.pos: Optional[int] = None
# Guard to ensure we only process deltas one at a time
self._is_processing = False

View file

@ -69,7 +69,7 @@ def _get_requested_host(request: IRequest) -> bytes:
return hostname
# no Host header, use the address/port that the request arrived on
host = request.getHost() # type: Union[address.IPv4Address, address.IPv6Address]
host: Union[address.IPv4Address, address.IPv6Address] = request.getHost()
hostname = host.host.encode("ascii")

View file

@ -160,7 +160,7 @@ class _IPBlacklistingResolver:
def resolveHostName(
self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0
) -> IResolutionReceiver:
addresses = [] # type: List[IAddress]
addresses: List[IAddress] = []
def _callback() -> None:
has_bad_ip = False
@ -334,9 +334,9 @@ class SimpleHttpClient:
if self._ip_blacklist:
# If we have an IP blacklist, we need to use a DNS resolver which
# filters out blacklisted IP addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
self.reactor: ISynapseReactor = BlacklistingReactorWrapper(
hs.get_reactor(), self._ip_whitelist, self._ip_blacklist
) # type: ISynapseReactor
)
else:
self.reactor = hs.get_reactor()
@ -350,14 +350,14 @@ class SimpleHttpClient:
pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5))
pool.cachedConnectionTimeout = 2 * 60
self.agent = ProxyAgent(
self.agent: IAgent = ProxyAgent(
self.reactor,
hs.get_reactor(),
connectTimeout=15,
contextFactory=self.hs.get_http_client_context_factory(),
pool=pool,
use_proxy=use_proxy,
) # type: IAgent
)
if self._ip_blacklist:
# If we have an IP blacklist, we then install the blacklisting Agent
@ -412,7 +412,7 @@ class SimpleHttpClient:
cooperator=self._cooperator,
)
request_deferred = treq.request(
request_deferred: defer.Deferred = treq.request(
method,
uri,
agent=self.agent,
@ -422,7 +422,7 @@ class SimpleHttpClient:
# response bodies.
unbuffered=True,
**self._extra_treq_args,
) # type: defer.Deferred
)
# we use our own timeout mechanism rather than treq's as a workaround
# for https://twistedmatrix.com/trac/ticket/9534.
@ -773,7 +773,7 @@ class BodyExceededMaxSize(Exception):
class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
"""A protocol which immediately errors upon receiving data."""
transport = None # type: Optional[ITCPTransport]
transport: Optional[ITCPTransport] = None
def __init__(self, deferred: defer.Deferred):
self.deferred = deferred
@ -799,7 +799,7 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
transport = None # type: Optional[ITCPTransport]
transport: Optional[ITCPTransport] = None
def __init__(
self, stream: ByteWriteable, deferred: defer.Deferred, max_size: Optional[int]

View file

@ -70,10 +70,8 @@ WELL_KNOWN_RETRY_ATTEMPTS = 3
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known") # type: TTLCache[bytes, Optional[bytes]]
_had_valid_well_known_cache = TTLCache(
"had-valid-well-known"
) # type: TTLCache[bytes, bool]
_well_known_cache: TTLCache[bytes, Optional[bytes]] = TTLCache("well-known")
_had_valid_well_known_cache: TTLCache[bytes, bool] = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
@ -130,9 +128,10 @@ class WellKnownResolver:
# requests for the same server in parallel?
try:
with Measure(self._clock, "get_well_known"):
result, cache_period = await self._fetch_well_known(
server_name
) # type: Optional[bytes], float
result: Optional[bytes]
cache_period: float
result, cache_period = await self._fetch_well_known(server_name)
except _FetchWellKnownFailure as e:
if prev_result and e.temporary:

View file

@ -43,6 +43,7 @@ from twisted.internet import defer
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import _EPSILON, Cooperator
from twisted.web.client import ResponseFailed
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer, IResponse
@ -105,7 +106,7 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC):
the parsed data.
"""
CONTENT_TYPE = abc.abstractproperty() # type: str # type: ignore
CONTENT_TYPE: str = abc.abstractproperty() # type: ignore
"""The expected content type of the response, e.g. `application/json`. If
the content type doesn't match we fail the request.
"""
@ -262,6 +263,15 @@ async def _handle_response(
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except ResponseFailed as e:
logger.warning(
"{%s} [%s] Failed to read response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response %s %s: %s",
@ -317,11 +327,11 @@ class MatrixFederationHttpClient:
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
self.reactor: ISynapseReactor = BlacklistingReactorWrapper(
hs.get_reactor(),
hs.config.federation_ip_range_whitelist,
hs.config.federation_ip_range_blacklist,
) # type: ISynapseReactor
)
user_agent = hs.version_string
if hs.config.user_agent_suffix:
@ -494,7 +504,7 @@ class MatrixFederationHttpClient:
)
# Inject the span into the headers
headers_dict = {} # type: Dict[bytes, List[bytes]]
headers_dict: Dict[bytes, List[bytes]] = {}
opentracing.inject_header_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
@ -523,9 +533,9 @@ class MatrixFederationHttpClient:
destination_bytes, method_bytes, url_to_sign_bytes, json
)
data = encode_canonical_json(json)
producer = QuieterFileBodyProducer(
producer: Optional[IBodyProducer] = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator
) # type: Optional[IBodyProducer]
)
else:
producer = None
auth_headers = self.build_auth_headers(
@ -1137,6 +1147,24 @@ class MatrixFederationHttpClient:
msg,
)
raise SynapseError(502, msg, Codes.TOO_LARGE)
except defer.TimeoutError as e:
logger.warning(
"{%s} [%s] Timed out reading response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except ResponseFailed as e:
logger.warning(
"{%s} [%s] Failed to read response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response: %s",

View file

@ -117,7 +117,8 @@ class ProxyAgent(_AgentBase):
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
# Parse credentials from https proxy connection string if present
# Parse credentials from http and https proxy connection string if present
self.http_proxy_creds, http_proxy = parse_username_password(http_proxy)
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
self.http_proxy_endpoint = _http_proxy_endpoint(
@ -171,7 +172,7 @@ class ProxyAgent(_AgentBase):
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
raise ValueError("Invalid URI {!r}".format(uri))
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
@ -189,6 +190,15 @@ class ProxyAgent(_AgentBase):
and self.http_proxy_endpoint
and not should_skip_proxy
):
# Determine whether we need to set Proxy-Authorization headers
if self.http_proxy_creds:
# Set a Proxy-Authorization header
if headers is None:
headers = Headers()
headers.addRawHeader(
b"Proxy-Authorization",
self.http_proxy_creds.as_proxy_authorization_value(),
)
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
pool_key = ("http-proxy", self.http_proxy_endpoint)

View file

@ -81,7 +81,7 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
if f.check(SynapseError):
# mypy doesn't understand that f.check asserts the type.
exc = f.value # type: SynapseError # type: ignore
exc: SynapseError = f.value # type: ignore
error_code = exc.code
error_dict = exc.error_dict()
@ -132,7 +132,7 @@ def return_html_error(
"""
if f.check(CodeMessageException):
# mypy doesn't understand that f.check asserts the type.
cme = f.value # type: CodeMessageException # type: ignore
cme: CodeMessageException = f.value # type: ignore
code = cme.code
msg = cme.msg
@ -404,7 +404,7 @@ class JsonResource(DirectServeJsonResource):
key word arguments to pass to the callback
"""
# At this point the path must be bytes.
request_path_bytes = request.path # type: bytes # type: ignore
request_path_bytes: bytes = request.path # type: ignore
request_path = request_path_bytes.decode("ascii")
# Treat HEAD requests as GET requests.
request_method = request.method
@ -557,7 +557,7 @@ class _ByteProducer:
request: Request,
iterator: Iterator[bytes],
):
self._request = request # type: Optional[Request]
self._request: Optional[Request] = request
self._iterator = iterator
self._paused = False

View file

@ -205,7 +205,7 @@ def parse_string(
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
args: Dict[bytes, List[bytes]] = request.args # type: ignore
return parse_string_from_args(
args,
name,

View file

@ -64,16 +64,16 @@ class SynapseRequest(Request):
def __init__(self, channel, *args, max_request_body_size=1024, **kw):
Request.__init__(self, channel, *args, **kw)
self._max_request_body_size = max_request_body_size
self.site = channel.site # type: SynapseSite
self.site: SynapseSite = channel.site
self._channel = channel # this is used by the tests
self.start_time = 0.0
# The requester, if authenticated. For federation requests this is the
# server name, for client requests this is the Requester object.
self._requester = None # type: Optional[Union[Requester, str]]
self._requester: Optional[Union[Requester, str]] = None
# we can't yet create the logcontext, as we don't know the method.
self.logcontext = None # type: Optional[LoggingContext]
self.logcontext: Optional[LoggingContext] = None
global _next_request_seq
self.request_seq = _next_request_seq
@ -152,7 +152,7 @@ class SynapseRequest(Request):
Returns:
The redacted URI as a string.
"""
uri = self.uri # type: Union[bytes, str]
uri: Union[bytes, str] = self.uri
if isinstance(uri, bytes):
uri = uri.decode("ascii", errors="replace")
return redact_uri(uri)
@ -167,7 +167,7 @@ class SynapseRequest(Request):
Returns:
The request method as a string.
"""
method = self.method # type: Union[bytes, str]
method: Union[bytes, str] = self.method
if isinstance(method, bytes):
return self.method.decode("ascii")
return method
@ -384,7 +384,7 @@ class SynapseRequest(Request):
# authenticated (e.g. and admin is puppetting a user) then we log both.
requester, authenticated_entity = self.get_authenticated_entity()
if authenticated_entity:
requester = "{}.{}".format(authenticated_entity, requester)
requester = f"{authenticated_entity}.{requester}"
self.site.access_logger.log(
log_level,
@ -434,8 +434,8 @@ class XForwardedForRequest(SynapseRequest):
"""
# the client IP and ssl flag, as extracted from the headers.
_forwarded_for = None # type: Optional[_XForwardedForAddress]
_forwarded_https = False # type: bool
_forwarded_for: "Optional[_XForwardedForAddress]" = None
_forwarded_https: bool = False
def requestReceived(self, command, path, version):
# this method is called by the Channel once the full request has been

View file

@ -110,9 +110,9 @@ class RemoteHandler(logging.Handler):
self.port = port
self.maximum_buffer = maximum_buffer
self._buffer = deque() # type: Deque[logging.LogRecord]
self._connection_waiter = None # type: Optional[Deferred]
self._producer = None # type: Optional[LogProducer]
self._buffer: Deque[logging.LogRecord] = deque()
self._connection_waiter: Optional[Deferred] = None
self._producer: Optional[LogProducer] = None
# Connect without DNS lookups if it's a direct IP.
if _reactor is None:
@ -123,9 +123,9 @@ class RemoteHandler(logging.Handler):
try:
ip = ip_address(self.host)
if isinstance(ip, IPv4Address):
endpoint = TCP4ClientEndpoint(
endpoint: IStreamClientEndpoint = TCP4ClientEndpoint(
_reactor, self.host, self.port
) # type: IStreamClientEndpoint
)
elif isinstance(ip, IPv6Address):
endpoint = TCP6ClientEndpoint(_reactor, self.host, self.port)
else:
@ -165,7 +165,7 @@ class RemoteHandler(logging.Handler):
def writer(result: Protocol) -> None:
# Force recognising transport as a Connection and not the more
# generic ITransport.
transport = result.transport # type: Connection # type: ignore
transport: Connection = result.transport # type: ignore
# We have a connection. If we already have a producer, and its
# transport is the same, just trigger a resumeProducing.
@ -188,7 +188,7 @@ class RemoteHandler(logging.Handler):
self._producer.resumeProducing()
self._connection_waiter = None
deferred = self._service.whenConnected(failAfterFailures=1) # type: Deferred
deferred: Deferred = self._service.whenConnected(failAfterFailures=1)
deferred.addCallbacks(writer, fail)
self._connection_waiter = deferred

View file

@ -63,7 +63,7 @@ def parse_drain_configs(
DrainType.CONSOLE_JSON,
DrainType.FILE_JSON,
):
formatter = "json" # type: Optional[str]
formatter: Optional[str] = "json"
elif logging_type in (
DrainType.CONSOLE_JSON_TERSE,
DrainType.NETWORK_JSON_TERSE,

View file

@ -113,13 +113,13 @@ class ContextResourceUsage:
self.reset()
else:
# FIXME: mypy can't infer the types set via reset() above, so specify explicitly for now
self.ru_utime = copy_from.ru_utime # type: float
self.ru_stime = copy_from.ru_stime # type: float
self.db_txn_count = copy_from.db_txn_count # type: int
self.ru_utime: float = copy_from.ru_utime
self.ru_stime: float = copy_from.ru_stime
self.db_txn_count: int = copy_from.db_txn_count
self.db_txn_duration_sec = copy_from.db_txn_duration_sec # type: float
self.db_sched_duration_sec = copy_from.db_sched_duration_sec # type: float
self.evt_db_fetch_count = copy_from.evt_db_fetch_count # type: int
self.db_txn_duration_sec: float = copy_from.db_txn_duration_sec
self.db_sched_duration_sec: float = copy_from.db_sched_duration_sec
self.evt_db_fetch_count: int = copy_from.evt_db_fetch_count
def copy(self) -> "ContextResourceUsage":
return ContextResourceUsage(copy_from=self)
@ -289,12 +289,12 @@ class LoggingContext:
# The thread resource usage when the logcontext became active. None
# if the context is not currently active.
self.usage_start = None # type: Optional[resource._RUsage]
self.usage_start: Optional[resource._RUsage] = None
self.main_thread = get_thread_id()
self.request = None
self.tag = ""
self.scope = None # type: Optional[_LogContextScope]
self.scope: Optional["_LogContextScope"] = None
# keep track of whether we have hit the __exit__ block for this context
# (suggesting that the the thing that created the context thinks it should

View file

@ -251,7 +251,7 @@ try:
except Exception:
logger.exception("Failed to report span")
RustReporter = _WrappedRustReporter # type: Optional[Type[_WrappedRustReporter]]
RustReporter: Optional[Type[_WrappedRustReporter]] = _WrappedRustReporter
except ImportError:
RustReporter = None
@ -286,7 +286,7 @@ class SynapseBaggage:
# Block everything by default
# A regex which matches the server_names to expose traces for.
# None means 'block everything'.
_homeserver_whitelist = None # type: Optional[Pattern[str]]
_homeserver_whitelist: Optional[Pattern[str]] = None
# Util methods
@ -374,7 +374,7 @@ def init_tracer(hs: "HomeServer"):
config = JaegerConfig(
config=hs.config.jaeger_config,
service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()),
service_name=f"{hs.config.server_name} {hs.get_instance_name()}",
scope_manager=LogContextScopeManager(hs.config),
metrics_factory=PrometheusMetricsFactory(),
)
@ -662,7 +662,7 @@ def inject_header_dict(
span = opentracing.tracer.active_span
carrier = {} # type: Dict[str, str]
carrier: Dict[str, str] = {}
opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier)
for key, value in carrier.items():
@ -704,7 +704,7 @@ def get_active_span_text_map(destination=None):
if destination and not whitelisted_homeserver(destination):
return {}
carrier = {} # type: Dict[str, str]
carrier: Dict[str, str] = {}
opentracing.tracer.inject(
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
)
@ -718,7 +718,7 @@ def active_span_context_as_string():
Returns:
The active span context encoded as a string.
"""
carrier = {} # type: Dict[str, str]
carrier: Dict[str, str] = {}
if opentracing:
opentracing.tracer.inject(
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier

View file

@ -46,7 +46,7 @@ logger = logging.getLogger(__name__)
METRICS_PREFIX = "/_synapse/metrics"
running_on_pypy = platform.python_implementation() == "PyPy"
all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge]]
all_gauges: "Dict[str, Union[LaterGauge, InFlightGauge]]" = {}
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
@ -130,7 +130,7 @@ class InFlightGauge:
)
# Counts number of in flight blocks for a given set of label values
self._registrations = {} # type: Dict
self._registrations: Dict = {}
# Protects access to _registrations
self._lock = threading.Lock()
@ -248,7 +248,7 @@ class GaugeBucketCollector:
# We initially set this to None. We won't report metrics until
# this has been initialised after a successful data update
self._metric = None # type: Optional[GaugeHistogramMetricFamily]
self._metric: Optional[GaugeHistogramMetricFamily] = None
registry.register(self)

View file

@ -34,7 +34,7 @@ from twisted.web.resource import Resource
from synapse.util import caches
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
INF = float("inf")
@ -55,8 +55,8 @@ def floatToGoString(d):
# Go switches to exponents sooner than Python.
# We only need to care about positive values for le/quantile.
if d > 0 and dot > 6:
mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.")
return "{0}e+0{1}".format(mantissa, dot - 1)
mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
return f"{mantissa}e+0{dot - 1}"
return s
@ -65,7 +65,7 @@ def sample_line(line, name):
labelstr = "{{{0}}}".format(
",".join(
[
'{0}="{1}"'.format(
'{}="{}"'.format(
k,
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
)
@ -78,10 +78,8 @@ def sample_line(line, name):
timestamp = ""
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = " {0:d}".format(int(float(line.timestamp) * 1000))
return "{0}{1} {2}{3}\n".format(
name, labelstr, floatToGoString(line.value), timestamp
)
timestamp = f" {int(float(line.timestamp) * 1000):d}"
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
def generate_latest(registry, emit_help=False):
@ -118,14 +116,14 @@ def generate_latest(registry, emit_help=False):
# Output in the old format for compatibility.
if emit_help:
output.append(
"# HELP {0} {1}\n".format(
"# HELP {} {}\n".format(
mname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0} {1}\n".format(mname, mtype))
output.append(f"# TYPE {mname} {mtype}\n")
om_samples = {} # type: Dict[str, List[str]]
om_samples: Dict[str, List[str]] = {}
for s in metric.samples:
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
@ -143,13 +141,13 @@ def generate_latest(registry, emit_help=False):
for suffix, lines in sorted(om_samples.items()):
if emit_help:
output.append(
"# HELP {0}{1} {2}\n".format(
"# HELP {}{} {}\n".format(
metric.name,
suffix,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0}{1} gauge\n".format(metric.name, suffix))
output.append(f"# TYPE {metric.name}{suffix} gauge\n")
output.extend(lines)
# Get rid of the weird colon things while we're at it
@ -163,12 +161,12 @@ def generate_latest(registry, emit_help=False):
# Also output in the new format, if it's different.
if emit_help:
output.append(
"# HELP {0} {1}\n".format(
"# HELP {} {}\n".format(
mnewname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0} {1}\n".format(mnewname, mtype))
output.append(f"# TYPE {mnewname} {mtype}\n")
for s in metric.samples:
# Get rid of the OpenMetrics specific samples (we should already have

View file

@ -93,7 +93,7 @@ _background_process_db_sched_duration = Counter(
# map from description to a counter, so that we can name our logcontexts
# incrementally. (It actually duplicates _background_process_start_count, but
# it's much simpler to do so than to try to combine them.)
_background_process_counts = {} # type: Dict[str, int]
_background_process_counts: Dict[str, int] = {}
# Set of all running background processes that became active active since the
# last time metrics were scraped (i.e. background processes that performed some
@ -103,7 +103,7 @@ _background_process_counts = {} # type: Dict[str, int]
# background processes stacking up behind a lock or linearizer, where we then
# only need to iterate over and update metrics for the process that have
# actually been active and can ignore the idle ones.
_background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]
_background_processes_active_since_last_scrape: "Set[_BackgroundProcess]" = set()
# A lock that covers the above set and dict
_bg_metrics_lock = threading.Lock()
@ -137,8 +137,7 @@ class _Collector:
_background_process_db_txn_duration,
_background_process_db_sched_duration,
):
for r in m.collect():
yield r
yield from m.collect()
REGISTRY.register(_Collector())

View file

@ -12,18 +12,42 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.utils
import logging
from typing import TYPE_CHECKING, Any, Generator, Iterable, List, Optional, Tuple
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
)
import jinja2
from twisted.internet import defer
from twisted.web.resource import IResource
from synapse.events import EventBase
from synapse.http.client import SimpleHttpClient
from synapse.http.server import (
DirectServeHtmlResource,
DirectServeJsonResource,
respond_with_html,
)
from synapse.http.servlet import parse_json_object_from_request
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.types import JsonDict, UserID, create_requester
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util import Clock
from synapse.util.caches.descriptors import cached
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -33,7 +57,20 @@ This package defines the 'stable' API which can be used by extension modules whi
are loaded into Synapse.
"""
__all__ = ["errors", "make_deferred_yieldable", "run_in_background", "ModuleApi"]
__all__ = [
"errors",
"make_deferred_yieldable",
"parse_json_object_from_request",
"respond_with_html",
"run_in_background",
"cached",
"UserID",
"DatabasePool",
"LoggingTransaction",
"DirectServeHtmlResource",
"DirectServeJsonResource",
"ModuleApi",
]
logger = logging.getLogger(__name__)
@ -52,12 +89,28 @@ class ModuleApi:
self._server_name = hs.hostname
self._presence_stream = hs.get_event_sources().sources["presence"]
self._state = hs.get_state_handler()
self._clock: Clock = hs.get_clock()
self._send_email_handler = hs.get_send_email_handler()
try:
app_name = self._hs.config.email_app_name
self._from_string = self._hs.config.email_notif_from % {"app": app_name}
except (KeyError, TypeError):
# If substitution failed (which can happen if the string contains
# placeholders other than just "app", or if the type of the placeholder is
# not a string), fall back to the bare strings.
self._from_string = self._hs.config.email_notif_from
self._raw_from = email.utils.parseaddr(self._from_string)[1]
# We expose these as properties below in order to attach a helpful docstring.
self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient
self._http_client: SimpleHttpClient = hs.get_simple_http_client()
self._public_room_list_manager = PublicRoomListManager(hs)
self._spam_checker = hs.get_spam_checker()
self._account_validity_handler = hs.get_account_validity_handler()
self._third_party_event_rules = hs.get_third_party_event_rules()
#################################################################################
# The following methods should only be called during the module's initialisation.
@ -67,6 +120,16 @@ class ModuleApi:
"""Registers callbacks for spam checking capabilities."""
return self._spam_checker.register_callbacks
@property
def register_account_validity_callbacks(self):
"""Registers callbacks for account validity capabilities."""
return self._account_validity_handler.register_account_validity_callbacks
@property
def register_third_party_rules_callbacks(self):
"""Registers callbacks for third party event rules capabilities."""
return self._third_party_event_rules.register_third_party_rules_callbacks
def register_web_resource(self, path: str, resource: IResource):
"""Registers a web resource to be served at the given path.
@ -101,22 +164,56 @@ class ModuleApi:
"""
return self._public_room_list_manager
def get_user_by_req(self, req, allow_guest=False):
@property
def public_baseurl(self) -> str:
"""The configured public base URL for this homeserver."""
return self._hs.config.public_baseurl
@property
def email_app_name(self) -> str:
"""The application name configured in the homeserver's configuration."""
return self._hs.config.email.email_app_name
async def get_user_by_req(
self,
req: SynapseRequest,
allow_guest: bool = False,
allow_expired: bool = False,
) -> Requester:
"""Check the access_token provided for a request
Args:
req (twisted.web.server.Request): Incoming HTTP request
allow_guest (bool): True if guest users should be allowed. If this
req: Incoming HTTP request
allow_guest: True if guest users should be allowed. If this
is False, and the access token is for a guest user, an
AuthError will be thrown
allow_expired: True if expired users should be allowed. If this
is False, and the access token is for an expired user, an
AuthError will be thrown
Returns:
twisted.internet.defer.Deferred[synapse.types.Requester]:
the requester for this request
The requester for this request
Raises:
synapse.api.errors.AuthError: if no user by that token exists,
InvalidClientCredentialsError: if no user by that token exists,
or the token is invalid.
"""
return self._auth.get_user_by_req(req, allow_guest)
return await self._auth.get_user_by_req(
req,
allow_guest,
allow_expired=allow_expired,
)
async def is_user_admin(self, user_id: str) -> bool:
"""Checks if a user is a server admin.
Args:
user_id: The Matrix ID of the user to check.
Returns:
True if the user is a server admin, False otherwise.
"""
return await self._store.is_server_admin(UserID.from_string(user_id))
def get_qualified_user_id(self, username):
"""Qualify a user id, if necessary
@ -134,6 +231,32 @@ class ModuleApi:
return username
return UserID(username, self._hs.hostname).to_string()
async def get_profile_for_user(self, localpart: str) -> ProfileInfo:
"""Look up the profile info for the user with the given localpart.
Args:
localpart: The localpart to look up profile information for.
Returns:
The profile information (i.e. display name and avatar URL).
"""
return await self._store.get_profileinfo(localpart)
async def get_threepids_for_user(self, user_id: str) -> List[Dict[str, str]]:
"""Look up the threepids (email addresses and phone numbers) associated with the
given Matrix user ID.
Args:
user_id: The Matrix user ID to look up threepids for.
Returns:
A list of threepids, each threepid being represented by a dictionary
containing a "medium" key which value is "email" for email addresses and
"msisdn" for phone numbers, and an "address" key which value is the
threepid's address.
"""
return await self._store.user_get_threepids(user_id)
def check_user_exists(self, user_id):
"""Check if user exists.
@ -464,6 +587,88 @@ class ModuleApi:
presence_events, destination
)
def looping_background_call(
self,
f: Callable,
msec: float,
*args,
desc: Optional[str] = None,
**kwargs,
):
"""Wraps a function as a background process and calls it repeatedly.
Waits `msec` initially before calling `f` for the first time.
Args:
f: The function to call repeatedly. f can be either synchronous or
asynchronous, and must follow Synapse's logcontext rules.
More info about logcontexts is available at
https://matrix-org.github.io/synapse/latest/log_contexts.html
msec: How long to wait between calls in milliseconds.
*args: Positional arguments to pass to function.
desc: The background task's description. Default to the function's name.
**kwargs: Key arguments to pass to function.
"""
if desc is None:
desc = f.__name__
if self._hs.config.run_background_tasks:
self._clock.looping_call(
run_as_background_process,
msec,
desc,
f,
*args,
**kwargs,
)
else:
logger.warning(
"Not running looping call %s as the configuration forbids it",
f,
)
async def send_mail(
self,
recipient: str,
subject: str,
html: str,
text: str,
):
"""Send an email on behalf of the homeserver.
Args:
recipient: The email address for the recipient.
subject: The email's subject.
html: The email's HTML content.
text: The email's text content.
"""
await self._send_email_handler.send_email(
email_address=recipient,
subject=subject,
app_name=self.email_app_name,
html=html,
text=text,
)
def read_templates(
self,
filenames: List[str],
custom_template_directory: Optional[str] = None,
) -> List[jinja2.Template]:
"""Read and load the content of the template files at the given location.
By default, Synapse will look for these templates in its configured template
directory, but another directory to search in can be provided.
Args:
filenames: The name of the template files to look for.
custom_template_directory: An additional directory to look for the files in.
Returns:
A list containing the loaded templates, with the orders matching the one of
the filenames parameter.
"""
return self._hs.config.read_templates(filenames, custom_template_directory)
class PublicRoomListManager:
"""Contains methods for adding to, removing from and querying whether a room

View file

@ -14,5 +14,9 @@
"""Exception types which are exposed as part of the stable module API"""
from synapse.api.errors import RedirectException, SynapseError # noqa: F401
from synapse.api.errors import ( # noqa: F401
InvalidClientCredentialsError,
RedirectException,
SynapseError,
)
from synapse.config._base import ConfigError # noqa: F401

View file

@ -203,21 +203,21 @@ class Notifier:
UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
def __init__(self, hs: "synapse.server.HomeServer"):
self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream]
self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]]
self.user_to_user_stream: Dict[str, _NotifierUserStream] = {}
self.room_to_user_streams: Dict[str, Set[_NotifierUserStream]] = {}
self.hs = hs
self.storage = hs.get_storage()
self.event_sources = hs.get_event_sources()
self.store = hs.get_datastore()
self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry]
self.pending_new_room_events: List[_PendingRoomEventEntry] = []
# Called when there are new things to stream over replication
self.replication_callbacks = [] # type: List[Callable[[], None]]
self.replication_callbacks: List[Callable[[], None]] = []
# Called when remote servers have come back online after having been
# down.
self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]]
self.remote_server_up_callbacks: List[Callable[[str], None]] = []
self.clock = hs.get_clock()
self.appservice_handler = hs.get_application_service_handler()
@ -237,7 +237,7 @@ class Notifier:
# when rendering the metrics page, which is likely once per minute at
# most when scraping it.
def count_listeners():
all_user_streams = set() # type: Set[_NotifierUserStream]
all_user_streams: Set[_NotifierUserStream] = set()
for streams in list(self.room_to_user_streams.values()):
all_user_streams |= streams
@ -329,8 +329,8 @@ class Notifier:
pending = self.pending_new_room_events
self.pending_new_room_events = []
users = set() # type: Set[UserID]
rooms = set() # type: Set[str]
users: Set[UserID] = set()
rooms: Set[str] = set()
for entry in pending:
if entry.event_pos.persisted_after(max_room_stream_token):
@ -580,7 +580,7 @@ class Notifier:
if after_token == before_token:
return EventStreamResult([], (from_token, from_token))
events = [] # type: List[EventBase]
events: List[EventBase] = []
end_token = from_token
for name, source in self.event_sources.sources.items():

View file

@ -194,7 +194,7 @@ class BulkPushRuleEvaluator:
count_as_unread = _should_count_as_unread(event, context)
rules_by_user = await self._get_rules_for_event(event, context)
actions_by_user = {} # type: Dict[str, List[Union[dict, str]]]
actions_by_user: Dict[str, List[Union[dict, str]]] = {}
room_members = await self.store.get_joined_users_from_context(event, context)
@ -207,7 +207,7 @@ class BulkPushRuleEvaluator:
event, len(room_members), sender_power_level, power_levels
)
condition_cache = {} # type: Dict[str, bool]
condition_cache: Dict[str, bool] = {}
# If the event is not a state event check if any users ignore the sender.
if not event.is_state():

View file

@ -26,10 +26,10 @@ def format_push_rules_for_user(user: UserID, ruleslist) -> Dict[str, Dict[str, l
# We're going to be mutating this a lot, so do a deep copy
ruleslist = copy.deepcopy(ruleslist)
rules = {
rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {
"global": {},
"device": {},
} # type: Dict[str, Dict[str, List[Dict[str, Any]]]]
}
rules["global"] = _add_empty_priority_class_arrays(rules["global"])

View file

@ -66,8 +66,8 @@ class EmailPusher(Pusher):
self.store = self.hs.get_datastore()
self.email = pusher_config.pushkey
self.timed_call = None # type: Optional[IDelayedCall]
self.throttle_params = {} # type: Dict[str, ThrottleParams]
self.timed_call: Optional[IDelayedCall] = None
self.throttle_params: Dict[str, ThrottleParams] = {}
self._inited = False
self._is_processing = False
@ -168,7 +168,7 @@ class EmailPusher(Pusher):
)
)
soonest_due_at = None # type: Optional[int]
soonest_due_at: Optional[int] = None
if not unprocessed:
await self.save_last_stream_ordering_and_success(self.max_stream_ordering)

View file

@ -71,7 +71,7 @@ class HttpPusher(Pusher):
self.data = pusher_config.data
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
self.failing_since = pusher_config.failing_since
self.timed_call = None # type: Optional[IDelayedCall]
self.timed_call: Optional[IDelayedCall] = None
self._is_processing = False
self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
self._pusherpool = hs.get_pusherpool()

View file

@ -110,7 +110,7 @@ class Mailer:
self.state_handler = self.hs.get_state_handler()
self.storage = hs.get_storage()
self.app_name = app_name
self.email_subjects = hs.config.email_subjects # type: EmailSubjectConfig
self.email_subjects: EmailSubjectConfig = hs.config.email_subjects
logger.info("Created Mailer for app_name %s" % app_name)
@ -230,7 +230,7 @@ class Mailer:
[pa["event_id"] for pa in push_actions]
)
notifs_by_room = {} # type: Dict[str, List[Dict[str, Any]]]
notifs_by_room: Dict[str, List[Dict[str, Any]]] = {}
for pa in push_actions:
notifs_by_room.setdefault(pa["room_id"], []).append(pa)
@ -356,13 +356,13 @@ class Mailer:
room_name = await calculate_room_name(self.store, room_state_ids, user_id)
room_vars = {
room_vars: Dict[str, Any] = {
"title": room_name,
"hash": string_ordinal_total(room_id), # See sender avatar hash
"notifs": [],
"invite": is_invite,
"link": self._make_room_link(room_id),
} # type: Dict[str, Any]
}
if not is_invite:
for n in notifs:
@ -460,9 +460,9 @@ class Mailer:
type_state_key = ("m.room.member", event.sender)
sender_state_event_id = room_state_ids.get(type_state_key)
if sender_state_event_id:
sender_state_event = await self.store.get_event(
sender_state_event: Optional[EventBase] = await self.store.get_event(
sender_state_event_id
) # type: Optional[EventBase]
)
else:
# Attempt to check the historical state for the room.
historical_state = await self.state_store.get_state_for_event(

View file

@ -199,7 +199,7 @@ def name_from_member_event(member_event: EventBase) -> str:
def _state_as_two_level_dict(state: StateMap[str]) -> Dict[str, Dict[str, str]]:
ret = {} # type: Dict[str, Dict[str, str]]
ret: Dict[str, Dict[str, str]] = {}
for k, v in state.items():
ret.setdefault(k[0], {})[k[1]] = v
return ret

View file

@ -195,9 +195,9 @@ class PushRuleEvaluatorForEvent:
# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
regex_cache = LruCache(
regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache(
50000, "regex_push_cache"
) # type: LruCache[Tuple[str, bool, bool], Pattern]
)
def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:

View file

@ -31,13 +31,13 @@ class PusherFactory:
self.hs = hs
self.config = hs.config
self.pusher_types = {
self.pusher_types: Dict[str, Callable[[HomeServer, PusherConfig], Pusher]] = {
"http": HttpPusher
} # type: Dict[str, Callable[[HomeServer, PusherConfig], Pusher]]
}
logger.info("email enable notifs: %r", hs.config.email_enable_notifs)
if hs.config.email_enable_notifs:
self.mailers = {} # type: Dict[str, Mailer]
self.mailers: Dict[str, Mailer] = {}
self._notif_template_html = hs.config.email_notif_template_html
self._notif_template_text = hs.config.email_notif_template_text

View file

@ -62,10 +62,6 @@ class PusherPool:
self.store = self.hs.get_datastore()
self.clock = self.hs.get_clock()
self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled
)
# We shard the handling of push notifications by user ID.
self._pusher_shard_config = hs.config.push.pusher_shard_config
self._instance_name = hs.get_instance_name()
@ -87,7 +83,9 @@ class PusherPool:
self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering()
# map from user id to app_id:pushkey to pusher
self.pushers = {} # type: Dict[str, Dict[str, Pusher]]
self.pushers: Dict[str, Dict[str, Pusher]] = {}
self._account_validity_handler = hs.get_account_validity_handler()
def start(self) -> None:
"""Starts the pushers off in a background process."""
@ -238,12 +236,9 @@ class PusherPool:
for u in users_affected:
# Don't push if the user account has expired
if self._account_validity_enabled:
expired = await self.store.is_account_expired(
u, self.clock.time_msec()
)
if expired:
continue
expired = await self._account_validity_handler.is_user_expired(u)
if expired:
continue
if u in self.pushers:
for p in self.pushers[u].values():
@ -268,12 +263,9 @@ class PusherPool:
for u in users_affected:
# Don't push if the user account has expired
if self._account_validity_enabled:
expired = await self.store.is_account_expired(
u, self.clock.time_msec()
)
if expired:
continue
expired = await self._account_validity_handler.is_user_expired(u)
if expired:
continue
if u in self.pushers:
for p in self.pushers[u].values():

View file

@ -115,7 +115,7 @@ CONDITIONAL_REQUIREMENTS = {
"cache_memory": ["pympler"],
}
ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]
ALL_OPTIONAL_REQUIREMENTS: Set[str] = set()
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
@ -193,7 +193,7 @@ def check_requirements(for_feature=None):
if not for_feature:
# Check the optional dependencies are up to date. We allow them to not be
# installed.
OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) # type: List[str]
OPTS: List[str] = sum(CONDITIONAL_REQUIREMENTS.values(), [])
for dependency in OPTS:
try:

Some files were not shown because too many files have changed in this diff Show more